2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
10 * Based on code from the OggTheora software codec source code,
11 * Copyright (C) 2002-2010 The Xiph.Org Foundation and contributors.
17 static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
26 got_w=got_h=got_fps=got_interlace=got_par=got_chroma=0;
28 /*Skip any leading spaces.*/
30 /*If that's all we have, stop.*/
32 /*Find the end of this tag.*/
33 for(q=p+1;*q!='\0'&&*q!=' ';q++);
37 if(sscanf(p+1,"%d",&_y4m->pic_w)!=1)return -1;
41 if(sscanf(p+1,"%d",&_y4m->pic_h)!=1)return -1;
45 if(sscanf(p+1,"%d:%d",&_y4m->fps_n,&_y4m->fps_d)!=2){
55 if(sscanf(p+1,"%d:%d",&_y4m->par_n,&_y4m->par_d)!=2){
62 memcpy(_y4m->chroma_type,p+1,q-p-1);
63 _y4m->chroma_type[q-p-1]='\0';
66 /*Ignore unknown tags.*/
69 if(!got_w||!got_h||!got_fps)return -1;
70 if(!got_interlace)_y4m->interlace='?';
71 if(!got_par)_y4m->par_n=_y4m->par_d=0;
72 /*Chroma-type is not specified in older files, e.g., those generated by
74 if(!got_chroma)strcpy(_y4m->chroma_type,"420");
80 /*All anti-aliasing filters in the following conversion functions are based on
81 one of two window functions:
82 The 6-tap Lanczos window (for down-sampling and shifts):
83 sinc(\pi*t)*sinc(\pi*t/3), |t|<3 (sinc(t)==sin(t)/t)
85 The 4-tap Mitchell window (for up-sampling):
86 7|t|^3-12|t|^2+16/3, |t|<1
87 -(7/3)|x|^3+12|x|^2-20|x|+32/3, |t|<2
89 The number of taps is intentionally kept small to reduce computational
90 overhead and limit ringing.
92 The taps from these filters are scaled so that their sum is 1, and the result
93 is scaled by 128 and rounded to integers to create a filter whose
94 intermediate values fit inside 16 bits.
95 Coefficients are rounded in such a way as to ensure their sum is still 128,
96 which is usually equivalent to normal rounding.
98 Conversions which require both horizontal and vertical filtering could
99 have these steps pipelined, for less memory consumption and better cache
100 performance, but we do them separately for simplicity.*/
102 #define OC_MINI(_a,_b) ((_a)>(_b)?(_b):(_a))
103 #define OC_MAXI(_a,_b) ((_a)<(_b)?(_b):(_a))
104 #define OC_CLAMPI(_a,_b,_c) (OC_MAXI(_a,OC_MINI(_b,_c)))
106 /*420jpeg chroma samples are sited like:
107 Y-------Y-------Y-------Y-------
111 Y-------Y-------Y-------Y-------
115 Y-------Y-------Y-------Y-------
119 Y-------Y-------Y-------Y-------
124 420mpeg2 chroma samples are sited like:
125 Y-------Y-------Y-------Y-------
129 Y-------Y-------Y-------Y-------
133 Y-------Y-------Y-------Y-------
137 Y-------Y-------Y-------Y-------
142 We use a resampling filter to shift the site locations one quarter pixel (at
143 the chroma plane's resolution) to the right.
144 The 4:2:2 modes look exactly the same, except there are twice as many chroma
145 lines, and they are vertically co-sited with the luma samples in both the
146 mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
147 static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
148 const unsigned char *_src,int _c_w,int _c_h){
152 /*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
154 for(x=0;x<OC_MINI(_c_w,2);x++){
155 _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[0]-17*_src[OC_MAXI(x-1,0)]+
156 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
157 _src[OC_MINI(x+3,_c_w-1)]+64)>>7,255);
160 _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
161 114*_src[x]+35*_src[x+1]-9*_src[x+2]+_src[x+3]+64)>>7,255);
164 _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
165 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
166 _src[_c_w-1]+64)>>7,255);
173 /*Handles both 422 and 420mpeg2 to 422jpeg and 420jpeg, respectively.*/
174 static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
175 unsigned char *_aux){
180 /*Skip past the luma data.*/
181 _dst+=_y4m->pic_w*_y4m->pic_h;
182 /*Compute the size of each chroma plane.*/
183 c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
184 c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
186 for(pli=1;pli<3;pli++){
187 y4m_42xmpeg2_42xjpeg_helper(_dst,_aux,c_w,c_h);
193 /*This format is only used for interlaced content, but is included for
196 420jpeg chroma samples are sited like:
197 Y-------Y-------Y-------Y-------
201 Y-------Y-------Y-------Y-------
205 Y-------Y-------Y-------Y-------
209 Y-------Y-------Y-------Y-------
214 420paldv chroma samples are sited like:
215 YR------Y-------YR------Y-------
219 YB------Y-------YB------Y-------
223 YR------Y-------YR------Y-------
227 YB------Y-------YB------Y-------
232 We use a resampling filter to shift the site locations one quarter pixel (at
233 the chroma plane's resolution) to the right.
234 Then we use another filter to move the C_r location down one quarter pixel,
235 and the C_b location up one quarter pixel.*/
236 static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
237 unsigned char *_aux){
245 /*Skip past the luma data.*/
246 _dst+=_y4m->pic_w*_y4m->pic_h;
247 /*Compute the size of each chroma plane.*/
248 c_w=(_y4m->pic_w+1)/2;
249 c_h=(_y4m->pic_h+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
252 for(pli=1;pli<3;pli++){
253 /*First do the horizontal re-sampling.
254 This is the same as the mpeg2 case, except that after the horizontal
255 case, we need to apply a second vertical filter.*/
256 y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
260 /*Slide C_b up a quarter-pel.
261 This is the same filter used above, but in the other order.*/
263 for(y=0;y<OC_MINI(c_h,3);y++){
264 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[0]
265 -9*tmp[OC_MAXI(y-2,0)*c_w]+35*tmp[OC_MAXI(y-1,0)*c_w]
266 +114*tmp[y*c_w]-17*tmp[OC_MINI(y+1,c_h-1)*c_w]
267 +4*tmp[OC_MINI(y+2,c_h-1)*c_w]+64)>>7,255);
270 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
271 -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
272 -17*tmp[(y+1)*c_w]+4*tmp[(y+2)*c_w]+64)>>7,255);
275 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
276 -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
277 -17*tmp[OC_MINI(y+1,c_h-1)*c_w]+4*tmp[(c_h-1)*c_w]+64)>>7,255);
286 /*Slide C_r down a quarter-pel.
287 This is the same as the horizontal filter.*/
289 for(y=0;y<OC_MINI(c_h,2);y++){
290 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[0]
291 -17*tmp[OC_MAXI(y-1,0)*c_w]+114*tmp[y*c_w]
292 +35*tmp[OC_MINI(y+1,c_h-1)*c_w]-9*tmp[OC_MINI(y+2,c_h-1)*c_w]
293 +tmp[OC_MINI(y+3,c_h-1)*c_w]+64)>>7,255);
296 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
297 -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[(y+1)*c_w]
298 -9*tmp[(y+2)*c_w]+tmp[(y+3)*c_w]+64)>>7,255);
301 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
302 -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[OC_MINI(y+1,c_h-1)*c_w]
303 -9*tmp[OC_MINI(y+2,c_h-1)*c_w]+tmp[(c_h-1)*c_w]+64)>>7,255);
310 /*For actual interlaced material, this would have to be done separately on
311 each field, and the shift amounts would be different.
312 C_r moves down 1/8, C_b up 3/8 in the top field, and C_r moves down 3/8,
313 C_b up 1/8 in the bottom field.
314 The corresponding filters would be:
315 Down 1/8 (reverse order for up): [3 -11 125 15 -4 0]/128
316 Down 3/8 (reverse order for up): [4 -19 98 56 -13 2]/128*/
320 /*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
321 This is used as a helper by several converation routines.*/
322 static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
323 const unsigned char *_src,int _c_w,int _c_h){
326 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
328 for(y=0;y<OC_MINI(_c_h,2);y+=2){
329 _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(64*_src[0]
330 +78*_src[OC_MINI(1,_c_h-1)*_c_w]
331 -17*_src[OC_MINI(2,_c_h-1)*_c_w]
332 +3*_src[OC_MINI(3,_c_h-1)*_c_w]+64)>>7,255);
335 _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]+_src[(y+3)*_c_w])
336 -17*(_src[(y-1)*_c_w]+_src[(y+2)*_c_w])
337 +78*(_src[y*_c_w]+_src[(y+1)*_c_w])+64)>>7,255);
340 _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]
341 +_src[(_c_h-1)*_c_w])-17*(_src[(y-1)*_c_w]
342 +_src[OC_MINI(y+2,_c_h-1)*_c_w])
343 +78*(_src[y*_c_w]+_src[OC_MINI(y+1,_c_h-1)*_c_w])+64)>>7,255);
350 /*420jpeg chroma samples are sited like:
351 Y-------Y-------Y-------Y-------
355 Y-------Y-------Y-------Y-------
359 Y-------Y-------Y-------Y-------
363 Y-------Y-------Y-------Y-------
368 422jpeg chroma samples are sited like:
369 Y---BR--Y-------Y---BR--Y-------
373 Y---BR--Y-------Y---BR--Y-------
377 Y---BR--Y-------Y---BR--Y-------
381 Y---BR--Y-------Y---BR--Y-------
386 We use a resampling filter to decimate the chroma planes by two in the
387 vertical direction.*/
388 static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
389 unsigned char *_aux){
397 /*Skip past the luma data.*/
398 _dst+=_y4m->pic_w*_y4m->pic_h;
399 /*Compute the size of each chroma plane.*/
400 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
402 dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
403 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
405 dst_c_sz=dst_c_w*dst_c_h;
406 for(pli=1;pli<3;pli++){
407 y4m_422jpeg_420jpeg_helper(_dst,_aux,c_w,c_h);
413 /*420jpeg chroma samples are sited like:
414 Y-------Y-------Y-------Y-------
418 Y-------Y-------Y-------Y-------
422 Y-------Y-------Y-------Y-------
426 Y-------Y-------Y-------Y-------
431 422 chroma samples are sited like:
432 YBR-----Y-------YBR-----Y-------
436 YBR-----Y-------YBR-----Y-------
440 YBR-----Y-------YBR-----Y-------
444 YBR-----Y-------YBR-----Y-------
449 We use a resampling filter to shift the original site locations one quarter
450 pixel (at the original chroma resolution) to the right.
451 Then we use a second resampling filter to decimate the chroma planes by two
452 in the vertical direction.*/
453 static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
454 unsigned char *_aux){
462 /*Skip past the luma data.*/
463 _dst+=_y4m->pic_w*_y4m->pic_h;
464 /*Compute the size of each chroma plane.*/
465 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
467 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
469 dst_c_sz=c_w*dst_c_h;
471 for(pli=1;pli<3;pli++){
472 /*In reality, the horizontal and vertical steps could be pipelined, for
473 less memory consumption and better cache performance, but we do them
474 separately for simplicity.*/
475 /*First do horizontal filtering (convert to 422jpeg)*/
476 y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
477 /*Now do the vertical filtering.*/
478 y4m_422jpeg_420jpeg_helper(_dst,tmp,c_w,c_h);
484 /*420jpeg chroma samples are sited like:
485 Y-------Y-------Y-------Y-------
489 Y-------Y-------Y-------Y-------
493 Y-------Y-------Y-------Y-------
497 Y-------Y-------Y-------Y-------
502 411 chroma samples are sited like:
503 YBR-----Y-------Y-------Y-------
507 YBR-----Y-------Y-------Y-------
511 YBR-----Y-------Y-------Y-------
515 YBR-----Y-------Y-------Y-------
520 We use a filter to resample at site locations one eighth pixel (at the source
521 chroma plane's horizontal resolution) and five eighths of a pixel to the
523 Then we use another filter to decimate the planes by 2 in the vertical
525 static void y4m_convert_411_420jpeg(y4m_input *_y4m,unsigned char *_dst,
526 unsigned char *_aux){
538 /*Skip past the luma data.*/
539 _dst+=_y4m->pic_w*_y4m->pic_h;
540 /*Compute the size of each chroma plane.*/
541 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
543 dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
544 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
546 dst_c_sz=dst_c_w*dst_c_h;
549 for(pli=1;pli<3;pli++){
550 /*In reality, the horizontal and vertical steps could be pipelined, for
551 less memory consumption and better cache performance, but we do them
552 separately for simplicity.*/
553 /*First do horizontal filtering (convert to 422jpeg)*/
555 /*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
556 4-tap Mitchell window.*/
557 for(x=0;x<OC_MINI(c_w,1);x++){
558 tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(111*_aux[0]
559 +18*_aux[OC_MINI(1,c_w-1)]-_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
560 tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(47*_aux[0]
561 +86*_aux[OC_MINI(1,c_w-1)]-5*_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
564 tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
565 +18*_aux[x+1]-_aux[x+2]+64)>>7,255);
566 tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
567 +86*_aux[x+1]-5*_aux[x+2]+64)>>7,255);
570 tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
571 +18*_aux[OC_MINI(x+1,c_w-1)]-_aux[c_w-1]+64)>>7,255);
572 if((x<<1|1)<dst_c_w){
573 tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
574 +86*_aux[OC_MINI(x+1,c_w-1)]-5*_aux[c_w-1]+64)>>7,255);
581 /*Now do the vertical filtering.*/
582 y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
587 /*Convert 444 to 420jpeg.*/
588 static void y4m_convert_444_420jpeg(y4m_input *_y4m,unsigned char *_dst,
589 unsigned char *_aux){
601 /*Skip past the luma data.*/
602 _dst+=_y4m->pic_w*_y4m->pic_h;
603 /*Compute the size of each chroma plane.*/
604 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
606 dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
607 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
609 dst_c_sz=dst_c_w*dst_c_h;
612 for(pli=1;pli<3;pli++){
613 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
615 for(x=0;x<OC_MINI(c_w,2);x+=2){
616 tmp[x>>1]=OC_CLAMPI(0,(64*_aux[0]+78*_aux[OC_MINI(1,c_w-1)]
617 -17*_aux[OC_MINI(2,c_w-1)]
618 +3*_aux[OC_MINI(3,c_w-1)]+64)>>7,255);
621 tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[x+3])
622 -17*(_aux[x-1]+_aux[x+2])+78*(_aux[x]+_aux[x+1])+64)>>7,255);
625 tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[c_w-1])-
626 17*(_aux[x-1]+_aux[OC_MINI(x+2,c_w-1)])+
627 78*(_aux[x]+_aux[OC_MINI(x+1,c_w-1)])+64)>>7,255);
633 /*Now do the vertical filtering.*/
634 y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
639 /*The image is padded with empty chroma components at 4:2:0.*/
640 static void y4m_convert_mono_420jpeg(y4m_input *_y4m,unsigned char *_dst,
641 unsigned char *_aux){
643 _dst+=_y4m->pic_w*_y4m->pic_h;
644 c_sz=((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
645 ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
646 memset(_dst,128,c_sz*2);
649 /*No conversion function needed.*/
650 static void y4m_convert_null(y4m_input *_y4m,unsigned char *_dst,
651 unsigned char *_aux){
654 int y4m_input_open(y4m_input *_y4m,FILE *_fin,char *_skip,int _nskip){
658 /*Read until newline, or 80 cols, whichever happens first.*/
665 ret=fread(buffer+i,1,1,_fin);
668 if(buffer[i]=='\n')break;
670 /*We skipped too much header data.*/
671 if(_nskip>0)return -1;
673 fprintf(stderr,"Error parsing header; not a YUV2MPEG2 file?\n");
677 if(memcmp(buffer,"YUV4MPEG",8)){
678 fprintf(stderr,"Incomplete magic for YUV4MPEG file.\n");
682 fprintf(stderr,"Incorrect YUV input file version; YUV4MPEG2 required.\n");
684 ret=y4m_parse_tags(_y4m,buffer+5);
686 fprintf(stderr,"Error parsing YUV4MPEG2 header.\n");
689 if(_y4m->interlace=='?'){
690 fprintf(stderr,"Warning: Input video interlacing format unknown; "
691 "assuming progressive scan.\n");
693 else if(_y4m->interlace!='p'){
694 fprintf(stderr,"Input video is interlaced; "
695 "Only progressive scan handled.\n");
698 if(strcmp(_y4m->chroma_type,"420")==0||
699 strcmp(_y4m->chroma_type,"420jpeg")==0){
700 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
701 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h
702 +2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
703 /*Natively supported: no conversion required.*/
704 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
705 _y4m->convert=y4m_convert_null;
707 else if(strcmp(_y4m->chroma_type,"420mpeg2")==0){
708 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
709 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
710 /*Chroma filter required: read into the aux buf first.*/
711 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=
712 2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
713 _y4m->convert=y4m_convert_42xmpeg2_42xjpeg;
715 else if(strcmp(_y4m->chroma_type,"420paldv")==0){
716 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
717 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
718 /*Chroma filter required: read into the aux buf first.
719 We need to make two filter passes, so we need some extra space in the
721 _y4m->aux_buf_sz=3*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
722 _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
723 _y4m->convert=y4m_convert_42xpaldv_42xjpeg;
725 else if(strcmp(_y4m->chroma_type,"422jpeg")==0){
726 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
729 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
730 /*Chroma filter required: read into the aux buf first.*/
731 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
732 _y4m->convert=y4m_convert_422jpeg_420jpeg;
734 else if(strcmp(_y4m->chroma_type,"422")==0){
735 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
738 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
739 /*Chroma filter required: read into the aux buf first.
740 We need to make two filter passes, so we need some extra space in the
742 _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
743 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
744 _y4m->convert=y4m_convert_422_420jpeg;
746 else if(strcmp(_y4m->chroma_type,"411")==0){
751 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
752 /*Chroma filter required: read into the aux buf first.
753 We need to make two filter passes, so we need some extra space in the
755 _y4m->aux_buf_read_sz=2*((_y4m->pic_w+3)/4)*_y4m->pic_h;
756 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
757 _y4m->convert=y4m_convert_411_420jpeg;
759 else if(strcmp(_y4m->chroma_type,"444")==0){
764 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
765 /*Chroma filter required: read into the aux buf first.
766 We need to make two filter passes, so we need some extra space in the
768 _y4m->aux_buf_read_sz=2*_y4m->pic_w*_y4m->pic_h;
769 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
770 _y4m->convert=y4m_convert_444_420jpeg;
772 else if(strcmp(_y4m->chroma_type,"444alpha")==0){
777 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
778 /*Chroma filter required: read into the aux buf first.
779 We need to make two filter passes, so we need some extra space in the
781 The extra plane also gets read into the aux buf.
782 It will be discarded.*/
783 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=3*_y4m->pic_w*_y4m->pic_h;
784 _y4m->convert=y4m_convert_444_420jpeg;
786 else if(strcmp(_y4m->chroma_type,"mono")==0){
787 _y4m->src_c_dec_h=_y4m->src_c_dec_v=0;
788 _y4m->dst_c_dec_h=_y4m->dst_c_dec_v=2;
789 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
790 /*No extra space required, but we need to clear the chroma planes.*/
791 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
792 _y4m->convert=y4m_convert_mono_420jpeg;
795 fprintf(stderr,"Unknown chroma sampling type: %s\n",_y4m->chroma_type);
798 /*The size of the final frame buffers is always computed from the
799 destination chroma decimation type.*/
800 _y4m->dst_buf_sz=_y4m->pic_w*_y4m->pic_h
801 +2*((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
802 ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
803 _y4m->dst_buf=(unsigned char *)malloc(_y4m->dst_buf_sz);
804 _y4m->aux_buf=(unsigned char *)malloc(_y4m->aux_buf_sz);
808 void y4m_input_close(y4m_input *_y4m){
813 int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *_img){
820 /*Read and skip the frame header.*/
821 ret=fread(frame,1,6,_fin);
823 if(memcmp(frame,"FRAME",5)){
824 fprintf(stderr,"Loss of framing in Y4M input data\n");
830 for(j=0;j<79&&fread(&c,1,1,_fin)&&c!='\n';j++);
832 fprintf(stderr,"Error parsing Y4M frame header\n");
836 /*Read the frame data that needs no conversion.*/
837 if(fread(_y4m->dst_buf,1,_y4m->dst_buf_read_sz,_fin)!=_y4m->dst_buf_read_sz){
838 fprintf(stderr,"Error reading Y4M frame data.\n");
841 /*Read the frame data that does need conversion.*/
842 if(fread(_y4m->aux_buf,1,_y4m->aux_buf_read_sz,_fin)!=_y4m->aux_buf_read_sz){
843 fprintf(stderr,"Error reading Y4M frame data.\n");
846 /*Now convert the just read frame.*/
847 (*_y4m->convert)(_y4m,_y4m->dst_buf,_y4m->aux_buf);
848 /*Fill in the frame buffer pointers.
849 We don't use vpx_img_wrap() because it forces padding for odd picture
850 sizes, which would require a separate fread call for every row.*/
851 memset(_img,0,sizeof(*_img));
852 /*Y4M has the planes in Y'CbCr order, which libvpx calls Y, U, and V.*/
853 _img->fmt=IMG_FMT_I420;
854 _img->w=_img->d_w=_y4m->pic_w;
855 _img->h=_img->d_h=_y4m->pic_h;
856 /*This is hard-coded to 4:2:0 for now, as that's all VP8 supports.*/
857 _img->x_chroma_shift=1;
858 _img->y_chroma_shift=1;
860 /*Set up the buffer pointers.*/
861 pic_sz=_y4m->pic_w*_y4m->pic_h;
862 c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
863 c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
865 _img->stride[PLANE_Y]=_y4m->pic_w;
866 _img->stride[PLANE_U]=_img->stride[PLANE_V]=c_w;
867 _img->planes[PLANE_Y]=_y4m->dst_buf;
868 _img->planes[PLANE_U]=_y4m->dst_buf+pic_sz;
869 _img->planes[PLANE_V]=_y4m->dst_buf+pic_sz+c_sz;