1 /* libFLAC - Free Lossless Audio Codec library
2 * Copyright (C) 2000,2001,2002,2003,2004,2005,2006,2007,2008,2009 Josh Coalson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * - Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * - Neither the name of the Xiph.org Foundation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include "FLAC/assert.h"
38 #include "FLAC/format.h"
39 #include "private/bitmath.h"
40 #include "private/lpc.h"
41 #if defined DEBUG || defined FLAC__OVERFLOW_DETECT || defined FLAC__OVERFLOW_DETECT_VERBOSE
45 /* OPT: #undef'ing this may improve the speed on some architectures */
46 #define FLAC__LPC_UNROLLED_FILTER_LOOPS
48 #ifndef FLAC__INTEGER_ONLY_LIBRARY
51 /* math.h in VC++ doesn't seem to have this (how Microsoft is that?) */
52 #define M_LN2 0.69314718055994530942
56 void FLAC__lpc_window_data(const FLAC__int32 in[], const FLAC__real window[], FLAC__real out[], unsigned data_len)
59 for(i = 0; i < data_len; i++)
60 out[i] = in[i] * window[i];
63 void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[])
65 /* a readable, but slower, version */
70 FLAC__ASSERT(lag > 0);
71 FLAC__ASSERT(lag <= data_len);
74 * Technically we should subtract the mean first like so:
75 * for(i = 0; i < data_len; i++)
77 * but it appears not to make enough of a difference to matter, and
78 * most signals are already closely centered around zero
81 for(i = lag, d = 0.0; i < data_len; i++)
82 d += data[i] * data[i - lag];
88 * this version tends to run faster because of better data locality
89 * ('data_len' is usually much larger than 'lag')
92 unsigned sample, coeff;
93 const unsigned limit = data_len - lag;
95 FLAC__ASSERT(lag > 0);
96 FLAC__ASSERT(lag <= data_len);
98 for(coeff = 0; coeff < lag; coeff++)
100 for(sample = 0; sample <= limit; sample++) {
102 for(coeff = 0; coeff < lag; coeff++)
103 autoc[coeff] += d * data[sample+coeff];
105 for(; sample < data_len; sample++) {
107 for(coeff = 0; coeff < data_len - sample; coeff++)
108 autoc[coeff] += d * data[sample+coeff];
112 void FLAC__lpc_compute_lp_coefficients(const FLAC__real autoc[], unsigned *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], FLAC__double error[])
115 FLAC__double r, err, lpc[FLAC__MAX_LPC_ORDER];
117 FLAC__ASSERT(0 != max_order);
118 FLAC__ASSERT(0 < *max_order);
119 FLAC__ASSERT(*max_order <= FLAC__MAX_LPC_ORDER);
120 FLAC__ASSERT(autoc[0] != 0.0);
124 for(i = 0; i < *max_order; i++) {
125 /* Sum up this iteration's reflection coefficient. */
127 for(j = 0; j < i; j++)
128 r -= lpc[j] * autoc[i-j];
130 /* Update LPC coefficients and total error. */
132 for(j = 0; j < (i>>1); j++) {
133 FLAC__double tmp = lpc[j];
134 lpc[j] += r * lpc[i-1-j];
135 lpc[i-1-j] += r * tmp;
138 lpc[j] += lpc[j] * r;
140 err *= (1.0 - r * r);
142 /* save this order */
143 for(j = 0; j <= i; j++)
144 lp_coeff[i][j] = (FLAC__real)(-lpc[j]); /* negate FIR filter coeff to get predictor coeff */
147 /* see SF bug #1601812 http://sourceforge.net/tracker/index.php?func=detail&aid=1601812&group_id=13478&atid=113478 */
155 int FLAC__lpc_quantize_coefficients(const FLAC__real lp_coeff[], unsigned order, unsigned precision, FLAC__int32 qlp_coeff[], int *shift)
159 FLAC__int32 qmax, qmin;
161 FLAC__ASSERT(precision > 0);
162 FLAC__ASSERT(precision >= FLAC__MIN_QLP_COEFF_PRECISION);
164 /* drop one bit for the sign; from here on out we consider only |lp_coeff[i]| */
166 qmax = 1 << precision;
170 /* calc cmax = max( |lp_coeff[i]| ) */
172 for(i = 0; i < order; i++) {
173 const FLAC__double d = fabs(lp_coeff[i]);
179 /* => coefficients are all 0, which means our constant-detect didn't work */
183 const int max_shiftlimit = (1 << (FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN-1)) - 1;
184 const int min_shiftlimit = -max_shiftlimit - 1;
187 (void)frexp(cmax, &log2cmax);
189 *shift = (int)precision - log2cmax - 1;
191 if(*shift > max_shiftlimit)
192 *shift = max_shiftlimit;
193 else if(*shift < min_shiftlimit)
198 FLAC__double error = 0.0;
200 for(i = 0; i < order; i++) {
201 error += lp_coeff[i] * (1 << *shift);
202 #if 1 /* unfortunately lround() is C99 */
204 q = (FLAC__int32)(error + 0.5);
206 q = (FLAC__int32)(error - 0.5);
210 #ifdef FLAC__OVERFLOW_DETECT
211 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
212 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
214 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
224 /* negative shift is very rare but due to design flaw, negative shift is
225 * a NOP in the decoder, so it must be handled specially by scaling down
229 const int nshift = -(*shift);
230 FLAC__double error = 0.0;
233 fprintf(stderr,"FLAC__lpc_quantize_coefficients: negative shift=%d order=%u cmax=%f\n", *shift, order, cmax);
235 for(i = 0; i < order; i++) {
236 error += lp_coeff[i] / (1 << nshift);
237 #if 1 /* unfortunately lround() is C99 */
239 q = (FLAC__int32)(error + 0.5);
241 q = (FLAC__int32)(error - 0.5);
245 #ifdef FLAC__OVERFLOW_DETECT
246 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
247 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
249 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
264 void FLAC__lpc_compute_residual_from_qlp_coefficients(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
265 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
270 const FLAC__int32 *history;
272 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
273 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
275 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
276 fprintf(stderr,"\n");
278 FLAC__ASSERT(order > 0);
280 for(i = 0; i < data_len; i++) {
284 for(j = 0; j < order; j++) {
285 sum += qlp_coeff[j] * (*(--history));
286 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
288 if(sumo > 2147483647I64 || sumo < -2147483648I64)
289 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%I64d\n",i,j,qlp_coeff[j],*history,sumo);
291 if(sumo > 2147483647ll || sumo < -2147483648ll)
292 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%lld\n",i,j,qlp_coeff[j],*history,(long long)sumo);
295 *(residual++) = *(data++) - (sum >> lp_quantization);
298 /* Here's a slower but clearer version:
299 for(i = 0; i < data_len; i++) {
301 for(j = 0; j < order; j++)
302 sum += qlp_coeff[j] * data[i-j-1];
303 residual[i] = data[i] - (sum >> lp_quantization);
307 #else /* fully unrolled version for normal use */
312 FLAC__ASSERT(order > 0);
313 FLAC__ASSERT(order <= 32);
316 * We do unique versions up to 12th order since that's the subset limit.
317 * Also they are roughly ordered to match frequency of occurrence to
318 * minimize branching.
324 for(i = 0; i < (int)data_len; i++) {
326 sum += qlp_coeff[11] * data[i-12];
327 sum += qlp_coeff[10] * data[i-11];
328 sum += qlp_coeff[9] * data[i-10];
329 sum += qlp_coeff[8] * data[i-9];
330 sum += qlp_coeff[7] * data[i-8];
331 sum += qlp_coeff[6] * data[i-7];
332 sum += qlp_coeff[5] * data[i-6];
333 sum += qlp_coeff[4] * data[i-5];
334 sum += qlp_coeff[3] * data[i-4];
335 sum += qlp_coeff[2] * data[i-3];
336 sum += qlp_coeff[1] * data[i-2];
337 sum += qlp_coeff[0] * data[i-1];
338 residual[i] = data[i] - (sum >> lp_quantization);
341 else { /* order == 11 */
342 for(i = 0; i < (int)data_len; i++) {
344 sum += qlp_coeff[10] * data[i-11];
345 sum += qlp_coeff[9] * data[i-10];
346 sum += qlp_coeff[8] * data[i-9];
347 sum += qlp_coeff[7] * data[i-8];
348 sum += qlp_coeff[6] * data[i-7];
349 sum += qlp_coeff[5] * data[i-6];
350 sum += qlp_coeff[4] * data[i-5];
351 sum += qlp_coeff[3] * data[i-4];
352 sum += qlp_coeff[2] * data[i-3];
353 sum += qlp_coeff[1] * data[i-2];
354 sum += qlp_coeff[0] * data[i-1];
355 residual[i] = data[i] - (sum >> lp_quantization);
361 for(i = 0; i < (int)data_len; i++) {
363 sum += qlp_coeff[9] * data[i-10];
364 sum += qlp_coeff[8] * data[i-9];
365 sum += qlp_coeff[7] * data[i-8];
366 sum += qlp_coeff[6] * data[i-7];
367 sum += qlp_coeff[5] * data[i-6];
368 sum += qlp_coeff[4] * data[i-5];
369 sum += qlp_coeff[3] * data[i-4];
370 sum += qlp_coeff[2] * data[i-3];
371 sum += qlp_coeff[1] * data[i-2];
372 sum += qlp_coeff[0] * data[i-1];
373 residual[i] = data[i] - (sum >> lp_quantization);
376 else { /* order == 9 */
377 for(i = 0; i < (int)data_len; i++) {
379 sum += qlp_coeff[8] * data[i-9];
380 sum += qlp_coeff[7] * data[i-8];
381 sum += qlp_coeff[6] * data[i-7];
382 sum += qlp_coeff[5] * data[i-6];
383 sum += qlp_coeff[4] * data[i-5];
384 sum += qlp_coeff[3] * data[i-4];
385 sum += qlp_coeff[2] * data[i-3];
386 sum += qlp_coeff[1] * data[i-2];
387 sum += qlp_coeff[0] * data[i-1];
388 residual[i] = data[i] - (sum >> lp_quantization);
396 for(i = 0; i < (int)data_len; i++) {
398 sum += qlp_coeff[7] * data[i-8];
399 sum += qlp_coeff[6] * data[i-7];
400 sum += qlp_coeff[5] * data[i-6];
401 sum += qlp_coeff[4] * data[i-5];
402 sum += qlp_coeff[3] * data[i-4];
403 sum += qlp_coeff[2] * data[i-3];
404 sum += qlp_coeff[1] * data[i-2];
405 sum += qlp_coeff[0] * data[i-1];
406 residual[i] = data[i] - (sum >> lp_quantization);
409 else { /* order == 7 */
410 for(i = 0; i < (int)data_len; i++) {
412 sum += qlp_coeff[6] * data[i-7];
413 sum += qlp_coeff[5] * data[i-6];
414 sum += qlp_coeff[4] * data[i-5];
415 sum += qlp_coeff[3] * data[i-4];
416 sum += qlp_coeff[2] * data[i-3];
417 sum += qlp_coeff[1] * data[i-2];
418 sum += qlp_coeff[0] * data[i-1];
419 residual[i] = data[i] - (sum >> lp_quantization);
425 for(i = 0; i < (int)data_len; i++) {
427 sum += qlp_coeff[5] * data[i-6];
428 sum += qlp_coeff[4] * data[i-5];
429 sum += qlp_coeff[3] * data[i-4];
430 sum += qlp_coeff[2] * data[i-3];
431 sum += qlp_coeff[1] * data[i-2];
432 sum += qlp_coeff[0] * data[i-1];
433 residual[i] = data[i] - (sum >> lp_quantization);
436 else { /* order == 5 */
437 for(i = 0; i < (int)data_len; i++) {
439 sum += qlp_coeff[4] * data[i-5];
440 sum += qlp_coeff[3] * data[i-4];
441 sum += qlp_coeff[2] * data[i-3];
442 sum += qlp_coeff[1] * data[i-2];
443 sum += qlp_coeff[0] * data[i-1];
444 residual[i] = data[i] - (sum >> lp_quantization);
452 for(i = 0; i < (int)data_len; i++) {
454 sum += qlp_coeff[3] * data[i-4];
455 sum += qlp_coeff[2] * data[i-3];
456 sum += qlp_coeff[1] * data[i-2];
457 sum += qlp_coeff[0] * data[i-1];
458 residual[i] = data[i] - (sum >> lp_quantization);
461 else { /* order == 3 */
462 for(i = 0; i < (int)data_len; i++) {
464 sum += qlp_coeff[2] * data[i-3];
465 sum += qlp_coeff[1] * data[i-2];
466 sum += qlp_coeff[0] * data[i-1];
467 residual[i] = data[i] - (sum >> lp_quantization);
473 for(i = 0; i < (int)data_len; i++) {
475 sum += qlp_coeff[1] * data[i-2];
476 sum += qlp_coeff[0] * data[i-1];
477 residual[i] = data[i] - (sum >> lp_quantization);
480 else { /* order == 1 */
481 for(i = 0; i < (int)data_len; i++)
482 residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
487 else { /* order > 12 */
488 for(i = 0; i < (int)data_len; i++) {
491 case 32: sum += qlp_coeff[31] * data[i-32];
492 case 31: sum += qlp_coeff[30] * data[i-31];
493 case 30: sum += qlp_coeff[29] * data[i-30];
494 case 29: sum += qlp_coeff[28] * data[i-29];
495 case 28: sum += qlp_coeff[27] * data[i-28];
496 case 27: sum += qlp_coeff[26] * data[i-27];
497 case 26: sum += qlp_coeff[25] * data[i-26];
498 case 25: sum += qlp_coeff[24] * data[i-25];
499 case 24: sum += qlp_coeff[23] * data[i-24];
500 case 23: sum += qlp_coeff[22] * data[i-23];
501 case 22: sum += qlp_coeff[21] * data[i-22];
502 case 21: sum += qlp_coeff[20] * data[i-21];
503 case 20: sum += qlp_coeff[19] * data[i-20];
504 case 19: sum += qlp_coeff[18] * data[i-19];
505 case 18: sum += qlp_coeff[17] * data[i-18];
506 case 17: sum += qlp_coeff[16] * data[i-17];
507 case 16: sum += qlp_coeff[15] * data[i-16];
508 case 15: sum += qlp_coeff[14] * data[i-15];
509 case 14: sum += qlp_coeff[13] * data[i-14];
510 case 13: sum += qlp_coeff[12] * data[i-13];
511 sum += qlp_coeff[11] * data[i-12];
512 sum += qlp_coeff[10] * data[i-11];
513 sum += qlp_coeff[ 9] * data[i-10];
514 sum += qlp_coeff[ 8] * data[i- 9];
515 sum += qlp_coeff[ 7] * data[i- 8];
516 sum += qlp_coeff[ 6] * data[i- 7];
517 sum += qlp_coeff[ 5] * data[i- 6];
518 sum += qlp_coeff[ 4] * data[i- 5];
519 sum += qlp_coeff[ 3] * data[i- 4];
520 sum += qlp_coeff[ 2] * data[i- 3];
521 sum += qlp_coeff[ 1] * data[i- 2];
522 sum += qlp_coeff[ 0] * data[i- 1];
524 residual[i] = data[i] - (sum >> lp_quantization);
530 void FLAC__lpc_compute_residual_from_qlp_coefficients_wide(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
531 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
535 const FLAC__int32 *history;
537 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
538 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
540 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
541 fprintf(stderr,"\n");
543 FLAC__ASSERT(order > 0);
545 for(i = 0; i < data_len; i++) {
548 for(j = 0; j < order; j++)
549 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
550 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
552 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, sum=%I64d\n", i, sum >> lp_quantization);
554 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, sum=%lld\n", i, (long long)(sum >> lp_quantization));
558 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*data) - (sum >> lp_quantization)) > 32) {
560 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, data=%d, sum=%I64d, residual=%I64d\n", i, *data, sum >> lp_quantization, (FLAC__int64)(*data) - (sum >> lp_quantization));
562 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, data=%d, sum=%lld, residual=%lld\n", i, *data, (long long)(sum >> lp_quantization), (long long)((FLAC__int64)(*data) - (sum >> lp_quantization)));
566 *(residual++) = *(data++) - (FLAC__int32)(sum >> lp_quantization);
569 #else /* fully unrolled version for normal use */
574 FLAC__ASSERT(order > 0);
575 FLAC__ASSERT(order <= 32);
578 * We do unique versions up to 12th order since that's the subset limit.
579 * Also they are roughly ordered to match frequency of occurrence to
580 * minimize branching.
586 for(i = 0; i < (int)data_len; i++) {
588 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
589 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
590 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
591 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
592 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
593 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
594 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
595 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
596 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
597 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
598 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
599 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
600 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
603 else { /* order == 11 */
604 for(i = 0; i < (int)data_len; i++) {
606 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
607 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
608 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
609 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
610 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
611 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
612 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
613 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
614 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
615 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
616 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
617 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
623 for(i = 0; i < (int)data_len; i++) {
625 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
626 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
627 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
628 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
629 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
630 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
631 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
632 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
633 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
634 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
635 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
638 else { /* order == 9 */
639 for(i = 0; i < (int)data_len; i++) {
641 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
642 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
643 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
644 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
645 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
646 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
647 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
648 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
649 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
650 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
658 for(i = 0; i < (int)data_len; i++) {
660 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
661 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
662 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
663 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
664 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
665 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
666 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
667 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
668 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
671 else { /* order == 7 */
672 for(i = 0; i < (int)data_len; i++) {
674 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
675 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
676 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
677 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
678 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
679 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
680 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
681 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
687 for(i = 0; i < (int)data_len; i++) {
689 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
690 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
691 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
692 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
693 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
694 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
695 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
698 else { /* order == 5 */
699 for(i = 0; i < (int)data_len; i++) {
701 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
702 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
703 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
704 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
705 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
706 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
714 for(i = 0; i < (int)data_len; i++) {
716 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
717 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
718 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
719 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
720 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
723 else { /* order == 3 */
724 for(i = 0; i < (int)data_len; i++) {
726 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
727 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
728 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
729 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
735 for(i = 0; i < (int)data_len; i++) {
737 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
738 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
739 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
742 else { /* order == 1 */
743 for(i = 0; i < (int)data_len; i++)
744 residual[i] = data[i] - (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
749 else { /* order > 12 */
750 for(i = 0; i < (int)data_len; i++) {
753 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
754 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
755 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
756 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
757 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
758 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
759 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
760 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
761 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
762 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
763 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
764 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
765 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
766 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
767 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
768 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
769 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
770 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
771 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
772 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
773 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
774 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
775 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
776 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
777 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
778 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
779 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
780 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
781 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
782 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
783 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
784 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
786 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
792 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */
794 void FLAC__lpc_restore_signal(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
795 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
800 const FLAC__int32 *r = residual, *history;
802 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
803 fprintf(stderr,"FLAC__lpc_restore_signal: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
805 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
806 fprintf(stderr,"\n");
808 FLAC__ASSERT(order > 0);
810 for(i = 0; i < data_len; i++) {
814 for(j = 0; j < order; j++) {
815 sum += qlp_coeff[j] * (*(--history));
816 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
818 if(sumo > 2147483647I64 || sumo < -2147483648I64)
819 fprintf(stderr,"FLAC__lpc_restore_signal: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%I64d\n",i,j,qlp_coeff[j],*history,sumo);
821 if(sumo > 2147483647ll || sumo < -2147483648ll)
822 fprintf(stderr,"FLAC__lpc_restore_signal: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%lld\n",i,j,qlp_coeff[j],*history,(long long)sumo);
825 *(data++) = *(r++) + (sum >> lp_quantization);
828 /* Here's a slower but clearer version:
829 for(i = 0; i < data_len; i++) {
831 for(j = 0; j < order; j++)
832 sum += qlp_coeff[j] * data[i-j-1];
833 data[i] = residual[i] + (sum >> lp_quantization);
837 #else /* fully unrolled version for normal use */
842 FLAC__ASSERT(order > 0);
843 FLAC__ASSERT(order <= 32);
846 * We do unique versions up to 12th order since that's the subset limit.
847 * Also they are roughly ordered to match frequency of occurrence to
848 * minimize branching.
854 for(i = 0; i < (int)data_len; i++) {
856 sum += qlp_coeff[11] * data[i-12];
857 sum += qlp_coeff[10] * data[i-11];
858 sum += qlp_coeff[9] * data[i-10];
859 sum += qlp_coeff[8] * data[i-9];
860 sum += qlp_coeff[7] * data[i-8];
861 sum += qlp_coeff[6] * data[i-7];
862 sum += qlp_coeff[5] * data[i-6];
863 sum += qlp_coeff[4] * data[i-5];
864 sum += qlp_coeff[3] * data[i-4];
865 sum += qlp_coeff[2] * data[i-3];
866 sum += qlp_coeff[1] * data[i-2];
867 sum += qlp_coeff[0] * data[i-1];
868 data[i] = residual[i] + (sum >> lp_quantization);
871 else { /* order == 11 */
872 for(i = 0; i < (int)data_len; i++) {
874 sum += qlp_coeff[10] * data[i-11];
875 sum += qlp_coeff[9] * data[i-10];
876 sum += qlp_coeff[8] * data[i-9];
877 sum += qlp_coeff[7] * data[i-8];
878 sum += qlp_coeff[6] * data[i-7];
879 sum += qlp_coeff[5] * data[i-6];
880 sum += qlp_coeff[4] * data[i-5];
881 sum += qlp_coeff[3] * data[i-4];
882 sum += qlp_coeff[2] * data[i-3];
883 sum += qlp_coeff[1] * data[i-2];
884 sum += qlp_coeff[0] * data[i-1];
885 data[i] = residual[i] + (sum >> lp_quantization);
891 for(i = 0; i < (int)data_len; i++) {
893 sum += qlp_coeff[9] * data[i-10];
894 sum += qlp_coeff[8] * data[i-9];
895 sum += qlp_coeff[7] * data[i-8];
896 sum += qlp_coeff[6] * data[i-7];
897 sum += qlp_coeff[5] * data[i-6];
898 sum += qlp_coeff[4] * data[i-5];
899 sum += qlp_coeff[3] * data[i-4];
900 sum += qlp_coeff[2] * data[i-3];
901 sum += qlp_coeff[1] * data[i-2];
902 sum += qlp_coeff[0] * data[i-1];
903 data[i] = residual[i] + (sum >> lp_quantization);
906 else { /* order == 9 */
907 for(i = 0; i < (int)data_len; i++) {
909 sum += qlp_coeff[8] * data[i-9];
910 sum += qlp_coeff[7] * data[i-8];
911 sum += qlp_coeff[6] * data[i-7];
912 sum += qlp_coeff[5] * data[i-6];
913 sum += qlp_coeff[4] * data[i-5];
914 sum += qlp_coeff[3] * data[i-4];
915 sum += qlp_coeff[2] * data[i-3];
916 sum += qlp_coeff[1] * data[i-2];
917 sum += qlp_coeff[0] * data[i-1];
918 data[i] = residual[i] + (sum >> lp_quantization);
926 for(i = 0; i < (int)data_len; i++) {
928 sum += qlp_coeff[7] * data[i-8];
929 sum += qlp_coeff[6] * data[i-7];
930 sum += qlp_coeff[5] * data[i-6];
931 sum += qlp_coeff[4] * data[i-5];
932 sum += qlp_coeff[3] * data[i-4];
933 sum += qlp_coeff[2] * data[i-3];
934 sum += qlp_coeff[1] * data[i-2];
935 sum += qlp_coeff[0] * data[i-1];
936 data[i] = residual[i] + (sum >> lp_quantization);
939 else { /* order == 7 */
940 for(i = 0; i < (int)data_len; i++) {
942 sum += qlp_coeff[6] * data[i-7];
943 sum += qlp_coeff[5] * data[i-6];
944 sum += qlp_coeff[4] * data[i-5];
945 sum += qlp_coeff[3] * data[i-4];
946 sum += qlp_coeff[2] * data[i-3];
947 sum += qlp_coeff[1] * data[i-2];
948 sum += qlp_coeff[0] * data[i-1];
949 data[i] = residual[i] + (sum >> lp_quantization);
955 for(i = 0; i < (int)data_len; i++) {
957 sum += qlp_coeff[5] * data[i-6];
958 sum += qlp_coeff[4] * data[i-5];
959 sum += qlp_coeff[3] * data[i-4];
960 sum += qlp_coeff[2] * data[i-3];
961 sum += qlp_coeff[1] * data[i-2];
962 sum += qlp_coeff[0] * data[i-1];
963 data[i] = residual[i] + (sum >> lp_quantization);
966 else { /* order == 5 */
967 for(i = 0; i < (int)data_len; i++) {
969 sum += qlp_coeff[4] * data[i-5];
970 sum += qlp_coeff[3] * data[i-4];
971 sum += qlp_coeff[2] * data[i-3];
972 sum += qlp_coeff[1] * data[i-2];
973 sum += qlp_coeff[0] * data[i-1];
974 data[i] = residual[i] + (sum >> lp_quantization);
982 for(i = 0; i < (int)data_len; i++) {
984 sum += qlp_coeff[3] * data[i-4];
985 sum += qlp_coeff[2] * data[i-3];
986 sum += qlp_coeff[1] * data[i-2];
987 sum += qlp_coeff[0] * data[i-1];
988 data[i] = residual[i] + (sum >> lp_quantization);
991 else { /* order == 3 */
992 for(i = 0; i < (int)data_len; i++) {
994 sum += qlp_coeff[2] * data[i-3];
995 sum += qlp_coeff[1] * data[i-2];
996 sum += qlp_coeff[0] * data[i-1];
997 data[i] = residual[i] + (sum >> lp_quantization);
1003 for(i = 0; i < (int)data_len; i++) {
1005 sum += qlp_coeff[1] * data[i-2];
1006 sum += qlp_coeff[0] * data[i-1];
1007 data[i] = residual[i] + (sum >> lp_quantization);
1010 else { /* order == 1 */
1011 for(i = 0; i < (int)data_len; i++)
1012 data[i] = residual[i] + ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
1017 else { /* order > 12 */
1018 for(i = 0; i < (int)data_len; i++) {
1021 case 32: sum += qlp_coeff[31] * data[i-32];
1022 case 31: sum += qlp_coeff[30] * data[i-31];
1023 case 30: sum += qlp_coeff[29] * data[i-30];
1024 case 29: sum += qlp_coeff[28] * data[i-29];
1025 case 28: sum += qlp_coeff[27] * data[i-28];
1026 case 27: sum += qlp_coeff[26] * data[i-27];
1027 case 26: sum += qlp_coeff[25] * data[i-26];
1028 case 25: sum += qlp_coeff[24] * data[i-25];
1029 case 24: sum += qlp_coeff[23] * data[i-24];
1030 case 23: sum += qlp_coeff[22] * data[i-23];
1031 case 22: sum += qlp_coeff[21] * data[i-22];
1032 case 21: sum += qlp_coeff[20] * data[i-21];
1033 case 20: sum += qlp_coeff[19] * data[i-20];
1034 case 19: sum += qlp_coeff[18] * data[i-19];
1035 case 18: sum += qlp_coeff[17] * data[i-18];
1036 case 17: sum += qlp_coeff[16] * data[i-17];
1037 case 16: sum += qlp_coeff[15] * data[i-16];
1038 case 15: sum += qlp_coeff[14] * data[i-15];
1039 case 14: sum += qlp_coeff[13] * data[i-14];
1040 case 13: sum += qlp_coeff[12] * data[i-13];
1041 sum += qlp_coeff[11] * data[i-12];
1042 sum += qlp_coeff[10] * data[i-11];
1043 sum += qlp_coeff[ 9] * data[i-10];
1044 sum += qlp_coeff[ 8] * data[i- 9];
1045 sum += qlp_coeff[ 7] * data[i- 8];
1046 sum += qlp_coeff[ 6] * data[i- 7];
1047 sum += qlp_coeff[ 5] * data[i- 6];
1048 sum += qlp_coeff[ 4] * data[i- 5];
1049 sum += qlp_coeff[ 3] * data[i- 4];
1050 sum += qlp_coeff[ 2] * data[i- 3];
1051 sum += qlp_coeff[ 1] * data[i- 2];
1052 sum += qlp_coeff[ 0] * data[i- 1];
1054 data[i] = residual[i] + (sum >> lp_quantization);
1060 void FLAC__lpc_restore_signal_wide(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
1061 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
1065 const FLAC__int32 *r = residual, *history;
1067 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
1068 fprintf(stderr,"FLAC__lpc_restore_signal_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
1069 for(i=0;i<order;i++)
1070 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
1071 fprintf(stderr,"\n");
1073 FLAC__ASSERT(order > 0);
1075 for(i = 0; i < data_len; i++) {
1078 for(j = 0; j < order; j++)
1079 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
1080 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
1082 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, sum=%I64d\n", i, sum >> lp_quantization);
1084 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, sum=%lld\n", i, (long long)(sum >> lp_quantization));
1088 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*r) + (sum >> lp_quantization)) > 32) {
1090 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, residual=%d, sum=%I64d, data=%I64d\n", i, *r, sum >> lp_quantization, (FLAC__int64)(*r) + (sum >> lp_quantization));
1092 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, residual=%d, sum=%lld, data=%lld\n", i, *r, (long long)(sum >> lp_quantization), (long long)((FLAC__int64)(*r) + (sum >> lp_quantization)));
1096 *(data++) = *(r++) + (FLAC__int32)(sum >> lp_quantization);
1099 #else /* fully unrolled version for normal use */
1104 FLAC__ASSERT(order > 0);
1105 FLAC__ASSERT(order <= 32);
1108 * We do unique versions up to 12th order since that's the subset limit.
1109 * Also they are roughly ordered to match frequency of occurrence to
1110 * minimize branching.
1116 for(i = 0; i < (int)data_len; i++) {
1118 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1119 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1120 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1121 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1122 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1123 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1124 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1125 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1126 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1127 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1128 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1129 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1130 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1133 else { /* order == 11 */
1134 for(i = 0; i < (int)data_len; i++) {
1136 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1137 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1138 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1139 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1140 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1141 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1142 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1143 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1144 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1145 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1146 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1147 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1153 for(i = 0; i < (int)data_len; i++) {
1155 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1156 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1157 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1158 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1159 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1160 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1161 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1162 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1163 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1164 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1165 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1168 else { /* order == 9 */
1169 for(i = 0; i < (int)data_len; i++) {
1171 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1172 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1173 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1174 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1175 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1176 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1177 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1178 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1179 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1180 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1185 else if(order > 4) {
1188 for(i = 0; i < (int)data_len; i++) {
1190 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1191 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1192 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1193 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1194 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1195 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1196 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1197 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1198 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1201 else { /* order == 7 */
1202 for(i = 0; i < (int)data_len; i++) {
1204 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1205 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1206 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1207 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1208 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1209 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1210 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1211 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1217 for(i = 0; i < (int)data_len; i++) {
1219 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1220 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1221 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1222 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1223 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1224 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1225 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1228 else { /* order == 5 */
1229 for(i = 0; i < (int)data_len; i++) {
1231 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1232 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1233 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1234 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1235 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1236 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1244 for(i = 0; i < (int)data_len; i++) {
1246 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1247 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1248 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1249 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1250 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1253 else { /* order == 3 */
1254 for(i = 0; i < (int)data_len; i++) {
1256 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1257 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1258 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1259 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1265 for(i = 0; i < (int)data_len; i++) {
1267 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1268 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1269 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1272 else { /* order == 1 */
1273 for(i = 0; i < (int)data_len; i++)
1274 data[i] = residual[i] + (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
1279 else { /* order > 12 */
1280 for(i = 0; i < (int)data_len; i++) {
1283 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
1284 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
1285 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
1286 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
1287 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
1288 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
1289 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
1290 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
1291 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
1292 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
1293 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
1294 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
1295 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
1296 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
1297 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
1298 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
1299 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
1300 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
1301 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
1302 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
1303 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1304 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1305 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
1306 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
1307 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
1308 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
1309 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
1310 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
1311 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
1312 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
1313 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
1314 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
1316 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1322 #ifndef FLAC__INTEGER_ONLY_LIBRARY
1324 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample(FLAC__double lpc_error, unsigned total_samples)
1326 FLAC__double error_scale;
1328 FLAC__ASSERT(total_samples > 0);
1330 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1332 return FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error, error_scale);
1335 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(FLAC__double lpc_error, FLAC__double error_scale)
1337 if(lpc_error > 0.0) {
1338 FLAC__double bps = (FLAC__double)0.5 * log(error_scale * lpc_error) / M_LN2;
1344 else if(lpc_error < 0.0) { /* error should not be negative but can happen due to inadequate floating-point resolution */
1352 unsigned FLAC__lpc_compute_best_order(const FLAC__double lpc_error[], unsigned max_order, unsigned total_samples, unsigned overhead_bits_per_order)
1354 unsigned order, index, best_index; /* 'index' the index into lpc_error; index==order-1 since lpc_error[0] is for order==1, lpc_error[1] is for order==2, etc */
1355 FLAC__double bits, best_bits, error_scale;
1357 FLAC__ASSERT(max_order > 0);
1358 FLAC__ASSERT(total_samples > 0);
1360 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1363 best_bits = (unsigned)(-1);
1365 for(index = 0, order = 1; index < max_order; index++, order++) {
1366 bits = FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error[index], error_scale) * (FLAC__double)(total_samples - order) + (FLAC__double)(order * overhead_bits_per_order);
1367 if(bits < best_bits) {
1373 return best_index+1; /* +1 since index of lpc_error[] is order-1 */
1376 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */