1 /* libFLAC - Free Lossless Audio Codec library
2 * Copyright (C) 2000,2001,2002,2003,2004,2005,2006,2007,2008,2009 Josh Coalson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * - Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * - Neither the name of the Xiph.org Foundation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include "FLAC/assert.h"
39 #include "FLAC/format.h"
40 #include "private/bitmath.h"
41 #include "private/lpc.h"
42 #include "private/macros.h"
43 #if defined DEBUG || defined FLAC__OVERFLOW_DETECT || defined FLAC__OVERFLOW_DETECT_VERBOSE
47 /* OPT: #undef'ing this may improve the speed on some architectures */
48 #define FLAC__LPC_UNROLLED_FILTER_LOOPS
50 #ifndef FLAC__INTEGER_ONLY_LIBRARY
53 /* math.h in VC++ doesn't seem to have this (how Microsoft is that?) */
54 #define M_LN2 0.69314718055994530942
57 #if !defined(HAVE_LROUND)
60 #define copysign _copysign
61 #elif defined(__GNUC__)
62 #define copysign __builtin_copysign
64 static inline long int lround(double x) {
65 return (long)(x + copysign (0.5, x));
67 //If this fails, we are in the precence of a mid 90's compiler..move along...
70 void FLAC__lpc_window_data(const FLAC__int32 in[], const FLAC__real window[], FLAC__real out[], unsigned data_len)
73 for(i = 0; i < data_len; i++)
74 out[i] = in[i] * window[i];
77 void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[])
79 /* a readable, but slower, version */
84 FLAC__ASSERT(lag > 0);
85 FLAC__ASSERT(lag <= data_len);
88 * Technically we should subtract the mean first like so:
89 * for(i = 0; i < data_len; i++)
91 * but it appears not to make enough of a difference to matter, and
92 * most signals are already closely centered around zero
95 for(i = lag, d = 0.0; i < data_len; i++)
96 d += data[i] * data[i - lag];
102 * this version tends to run faster because of better data locality
103 * ('data_len' is usually much larger than 'lag')
106 unsigned sample, coeff;
107 const unsigned limit = data_len - lag;
109 FLAC__ASSERT(lag > 0);
110 FLAC__ASSERT(lag <= data_len);
112 for(coeff = 0; coeff < lag; coeff++)
114 for(sample = 0; sample <= limit; sample++) {
116 for(coeff = 0; coeff < lag; coeff++)
117 autoc[coeff] += d * data[sample+coeff];
119 for(; sample < data_len; sample++) {
121 for(coeff = 0; coeff < data_len - sample; coeff++)
122 autoc[coeff] += d * data[sample+coeff];
126 void FLAC__lpc_compute_lp_coefficients(const FLAC__real autoc[], unsigned *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], FLAC__double error[])
129 FLAC__double r, err, lpc[FLAC__MAX_LPC_ORDER];
131 FLAC__ASSERT(0 != max_order);
132 FLAC__ASSERT(0 < *max_order);
133 FLAC__ASSERT(*max_order <= FLAC__MAX_LPC_ORDER);
134 FLAC__ASSERT(autoc[0] != 0.0);
138 for(i = 0; i < *max_order; i++) {
139 /* Sum up this iteration's reflection coefficient. */
141 for(j = 0; j < i; j++)
142 r -= lpc[j] * autoc[i-j];
145 /* Update LPC coefficients and total error. */
147 for(j = 0; j < (i>>1); j++) {
148 FLAC__double tmp = lpc[j];
149 lpc[j] += r * lpc[i-1-j];
150 lpc[i-1-j] += r * tmp;
153 lpc[j] += lpc[j] * r;
155 err *= (1.0 - r * r);
157 /* save this order */
158 for(j = 0; j <= i; j++)
159 lp_coeff[i][j] = (FLAC__real)(-lpc[j]); /* negate FIR filter coeff to get predictor coeff */
162 /* see SF bug #1601812 http://sourceforge.net/tracker/index.php?func=detail&aid=1601812&group_id=13478&atid=113478 */
170 int FLAC__lpc_quantize_coefficients(const FLAC__real lp_coeff[], unsigned order, unsigned precision, FLAC__int32 qlp_coeff[], int *shift)
174 FLAC__int32 qmax, qmin;
176 FLAC__ASSERT(precision > 0);
177 FLAC__ASSERT(precision >= FLAC__MIN_QLP_COEFF_PRECISION);
179 /* drop one bit for the sign; from here on out we consider only |lp_coeff[i]| */
181 qmax = 1 << precision;
185 /* calc cmax = max( |lp_coeff[i]| ) */
187 for(i = 0; i < order; i++) {
188 const FLAC__double d = fabs(lp_coeff[i]);
194 /* => coefficients are all 0, which means our constant-detect didn't work */
198 const int max_shiftlimit = (1 << (FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN-1)) - 1;
199 const int min_shiftlimit = -max_shiftlimit - 1;
202 (void)frexp(cmax, &log2cmax);
204 *shift = (int)precision - log2cmax - 1;
206 if(*shift > max_shiftlimit)
207 *shift = max_shiftlimit;
208 else if(*shift < min_shiftlimit)
213 FLAC__double error = 0.0;
215 for(i = 0; i < order; i++) {
216 error += lp_coeff[i] * (1 << *shift);
219 #ifdef FLAC__OVERFLOW_DETECT
220 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
221 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
223 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
233 /* negative shift is very rare but due to design flaw, negative shift is
234 * a NOP in the decoder, so it must be handled specially by scaling down
238 const int nshift = -(*shift);
239 FLAC__double error = 0.0;
242 fprintf(stderr,"FLAC__lpc_quantize_coefficients: negative shift=%d order=%u cmax=%f\n", *shift, order, cmax);
244 for(i = 0; i < order; i++) {
245 error += lp_coeff[i] / (1 << nshift);
247 #ifdef FLAC__OVERFLOW_DETECT
248 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
249 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
251 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
266 void FLAC__lpc_compute_residual_from_qlp_coefficients(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
267 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
272 const FLAC__int32 *history;
274 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
275 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
277 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
278 fprintf(stderr,"\n");
280 FLAC__ASSERT(order > 0);
282 for(i = 0; i < data_len; i++) {
286 for(j = 0; j < order; j++) {
287 sum += qlp_coeff[j] * (*(--history));
288 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
289 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%" PRId64 "\n",i,j,qlp_coeff[j],*history,sumo);
291 *(residual++) = *(data++) - (sum >> lp_quantization);
294 /* Here's a slower but clearer version:
295 for(i = 0; i < data_len; i++) {
297 for(j = 0; j < order; j++)
298 sum += qlp_coeff[j] * data[i-j-1];
299 residual[i] = data[i] - (sum >> lp_quantization);
303 #else /* fully unrolled version for normal use */
308 FLAC__ASSERT(order > 0);
309 FLAC__ASSERT(order <= 32);
312 * We do unique versions up to 12th order since that's the subset limit.
313 * Also they are roughly ordered to match frequency of occurrence to
314 * minimize branching.
320 for(i = 0; i < (int)data_len; i++) {
322 sum += qlp_coeff[11] * data[i-12];
323 sum += qlp_coeff[10] * data[i-11];
324 sum += qlp_coeff[9] * data[i-10];
325 sum += qlp_coeff[8] * data[i-9];
326 sum += qlp_coeff[7] * data[i-8];
327 sum += qlp_coeff[6] * data[i-7];
328 sum += qlp_coeff[5] * data[i-6];
329 sum += qlp_coeff[4] * data[i-5];
330 sum += qlp_coeff[3] * data[i-4];
331 sum += qlp_coeff[2] * data[i-3];
332 sum += qlp_coeff[1] * data[i-2];
333 sum += qlp_coeff[0] * data[i-1];
334 residual[i] = data[i] - (sum >> lp_quantization);
337 else { /* order == 11 */
338 for(i = 0; i < (int)data_len; i++) {
340 sum += qlp_coeff[10] * data[i-11];
341 sum += qlp_coeff[9] * data[i-10];
342 sum += qlp_coeff[8] * data[i-9];
343 sum += qlp_coeff[7] * data[i-8];
344 sum += qlp_coeff[6] * data[i-7];
345 sum += qlp_coeff[5] * data[i-6];
346 sum += qlp_coeff[4] * data[i-5];
347 sum += qlp_coeff[3] * data[i-4];
348 sum += qlp_coeff[2] * data[i-3];
349 sum += qlp_coeff[1] * data[i-2];
350 sum += qlp_coeff[0] * data[i-1];
351 residual[i] = data[i] - (sum >> lp_quantization);
357 for(i = 0; i < (int)data_len; i++) {
359 sum += qlp_coeff[9] * data[i-10];
360 sum += qlp_coeff[8] * data[i-9];
361 sum += qlp_coeff[7] * data[i-8];
362 sum += qlp_coeff[6] * data[i-7];
363 sum += qlp_coeff[5] * data[i-6];
364 sum += qlp_coeff[4] * data[i-5];
365 sum += qlp_coeff[3] * data[i-4];
366 sum += qlp_coeff[2] * data[i-3];
367 sum += qlp_coeff[1] * data[i-2];
368 sum += qlp_coeff[0] * data[i-1];
369 residual[i] = data[i] - (sum >> lp_quantization);
372 else { /* order == 9 */
373 for(i = 0; i < (int)data_len; i++) {
375 sum += qlp_coeff[8] * data[i-9];
376 sum += qlp_coeff[7] * data[i-8];
377 sum += qlp_coeff[6] * data[i-7];
378 sum += qlp_coeff[5] * data[i-6];
379 sum += qlp_coeff[4] * data[i-5];
380 sum += qlp_coeff[3] * data[i-4];
381 sum += qlp_coeff[2] * data[i-3];
382 sum += qlp_coeff[1] * data[i-2];
383 sum += qlp_coeff[0] * data[i-1];
384 residual[i] = data[i] - (sum >> lp_quantization);
392 for(i = 0; i < (int)data_len; i++) {
394 sum += qlp_coeff[7] * data[i-8];
395 sum += qlp_coeff[6] * data[i-7];
396 sum += qlp_coeff[5] * data[i-6];
397 sum += qlp_coeff[4] * data[i-5];
398 sum += qlp_coeff[3] * data[i-4];
399 sum += qlp_coeff[2] * data[i-3];
400 sum += qlp_coeff[1] * data[i-2];
401 sum += qlp_coeff[0] * data[i-1];
402 residual[i] = data[i] - (sum >> lp_quantization);
405 else { /* order == 7 */
406 for(i = 0; i < (int)data_len; i++) {
408 sum += qlp_coeff[6] * data[i-7];
409 sum += qlp_coeff[5] * data[i-6];
410 sum += qlp_coeff[4] * data[i-5];
411 sum += qlp_coeff[3] * data[i-4];
412 sum += qlp_coeff[2] * data[i-3];
413 sum += qlp_coeff[1] * data[i-2];
414 sum += qlp_coeff[0] * data[i-1];
415 residual[i] = data[i] - (sum >> lp_quantization);
421 for(i = 0; i < (int)data_len; i++) {
423 sum += qlp_coeff[5] * data[i-6];
424 sum += qlp_coeff[4] * data[i-5];
425 sum += qlp_coeff[3] * data[i-4];
426 sum += qlp_coeff[2] * data[i-3];
427 sum += qlp_coeff[1] * data[i-2];
428 sum += qlp_coeff[0] * data[i-1];
429 residual[i] = data[i] - (sum >> lp_quantization);
432 else { /* order == 5 */
433 for(i = 0; i < (int)data_len; i++) {
435 sum += qlp_coeff[4] * data[i-5];
436 sum += qlp_coeff[3] * data[i-4];
437 sum += qlp_coeff[2] * data[i-3];
438 sum += qlp_coeff[1] * data[i-2];
439 sum += qlp_coeff[0] * data[i-1];
440 residual[i] = data[i] - (sum >> lp_quantization);
448 for(i = 0; i < (int)data_len; i++) {
450 sum += qlp_coeff[3] * data[i-4];
451 sum += qlp_coeff[2] * data[i-3];
452 sum += qlp_coeff[1] * data[i-2];
453 sum += qlp_coeff[0] * data[i-1];
454 residual[i] = data[i] - (sum >> lp_quantization);
457 else { /* order == 3 */
458 for(i = 0; i < (int)data_len; i++) {
460 sum += qlp_coeff[2] * data[i-3];
461 sum += qlp_coeff[1] * data[i-2];
462 sum += qlp_coeff[0] * data[i-1];
463 residual[i] = data[i] - (sum >> lp_quantization);
469 for(i = 0; i < (int)data_len; i++) {
471 sum += qlp_coeff[1] * data[i-2];
472 sum += qlp_coeff[0] * data[i-1];
473 residual[i] = data[i] - (sum >> lp_quantization);
476 else { /* order == 1 */
477 for(i = 0; i < (int)data_len; i++)
478 residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
483 else { /* order > 12 */
484 for(i = 0; i < (int)data_len; i++) {
487 case 32: sum += qlp_coeff[31] * data[i-32];
488 case 31: sum += qlp_coeff[30] * data[i-31];
489 case 30: sum += qlp_coeff[29] * data[i-30];
490 case 29: sum += qlp_coeff[28] * data[i-29];
491 case 28: sum += qlp_coeff[27] * data[i-28];
492 case 27: sum += qlp_coeff[26] * data[i-27];
493 case 26: sum += qlp_coeff[25] * data[i-26];
494 case 25: sum += qlp_coeff[24] * data[i-25];
495 case 24: sum += qlp_coeff[23] * data[i-24];
496 case 23: sum += qlp_coeff[22] * data[i-23];
497 case 22: sum += qlp_coeff[21] * data[i-22];
498 case 21: sum += qlp_coeff[20] * data[i-21];
499 case 20: sum += qlp_coeff[19] * data[i-20];
500 case 19: sum += qlp_coeff[18] * data[i-19];
501 case 18: sum += qlp_coeff[17] * data[i-18];
502 case 17: sum += qlp_coeff[16] * data[i-17];
503 case 16: sum += qlp_coeff[15] * data[i-16];
504 case 15: sum += qlp_coeff[14] * data[i-15];
505 case 14: sum += qlp_coeff[13] * data[i-14];
506 case 13: sum += qlp_coeff[12] * data[i-13];
507 sum += qlp_coeff[11] * data[i-12];
508 sum += qlp_coeff[10] * data[i-11];
509 sum += qlp_coeff[ 9] * data[i-10];
510 sum += qlp_coeff[ 8] * data[i- 9];
511 sum += qlp_coeff[ 7] * data[i- 8];
512 sum += qlp_coeff[ 6] * data[i- 7];
513 sum += qlp_coeff[ 5] * data[i- 6];
514 sum += qlp_coeff[ 4] * data[i- 5];
515 sum += qlp_coeff[ 3] * data[i- 4];
516 sum += qlp_coeff[ 2] * data[i- 3];
517 sum += qlp_coeff[ 1] * data[i- 2];
518 sum += qlp_coeff[ 0] * data[i- 1];
520 residual[i] = data[i] - (sum >> lp_quantization);
526 void FLAC__lpc_compute_residual_from_qlp_coefficients_wide(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
527 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
531 const FLAC__int32 *history;
533 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
534 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
536 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
537 fprintf(stderr,"\n");
539 FLAC__ASSERT(order > 0);
541 for(i = 0; i < data_len; i++) {
544 for(j = 0; j < order; j++)
545 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
546 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
547 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, sum=%" PRId64 "\n", i, (sum >> lp_quantization));
550 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*data) - (sum >> lp_quantization)) > 32) {
551 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, data=%d, sum=%" PRId64 ", residual=%" PRId64 "\n", i, *data, (long long)(sum >> lp_quantization), ((FLAC__int64)(*data) - (sum >> lp_quantization)));
554 *(residual++) = *(data++) - (FLAC__int32)(sum >> lp_quantization);
557 #else /* fully unrolled version for normal use */
562 FLAC__ASSERT(order > 0);
563 FLAC__ASSERT(order <= 32);
566 * We do unique versions up to 12th order since that's the subset limit.
567 * Also they are roughly ordered to match frequency of occurrence to
568 * minimize branching.
574 for(i = 0; i < (int)data_len; i++) {
576 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
577 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
578 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
579 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
580 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
581 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
582 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
583 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
584 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
585 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
586 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
587 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
588 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
591 else { /* order == 11 */
592 for(i = 0; i < (int)data_len; i++) {
594 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
595 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
596 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
597 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
598 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
599 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
600 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
601 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
602 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
603 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
604 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
605 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
611 for(i = 0; i < (int)data_len; i++) {
613 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
614 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
615 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
616 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
617 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
618 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
619 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
620 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
621 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
622 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
623 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
626 else { /* order == 9 */
627 for(i = 0; i < (int)data_len; i++) {
629 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
630 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
631 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
632 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
633 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
634 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
635 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
636 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
637 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
638 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
646 for(i = 0; i < (int)data_len; i++) {
648 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
649 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
650 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
651 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
652 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
653 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
654 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
655 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
656 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
659 else { /* order == 7 */
660 for(i = 0; i < (int)data_len; i++) {
662 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
663 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
664 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
665 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
666 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
667 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
668 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
669 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
675 for(i = 0; i < (int)data_len; i++) {
677 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
678 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
679 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
680 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
681 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
682 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
683 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
686 else { /* order == 5 */
687 for(i = 0; i < (int)data_len; i++) {
689 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
690 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
691 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
692 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
693 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
694 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
702 for(i = 0; i < (int)data_len; i++) {
704 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
705 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
706 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
707 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
708 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
711 else { /* order == 3 */
712 for(i = 0; i < (int)data_len; i++) {
714 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
715 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
716 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
717 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
723 for(i = 0; i < (int)data_len; i++) {
725 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
726 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
727 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
730 else { /* order == 1 */
731 for(i = 0; i < (int)data_len; i++)
732 residual[i] = data[i] - (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
737 else { /* order > 12 */
738 for(i = 0; i < (int)data_len; i++) {
741 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
742 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
743 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
744 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
745 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
746 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
747 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
748 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
749 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
750 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
751 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
752 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
753 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
754 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
755 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
756 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
757 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
758 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
759 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
760 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
761 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
762 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
763 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
764 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
765 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
766 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
767 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
768 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
769 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
770 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
771 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
772 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
774 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
780 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */
782 void FLAC__lpc_restore_signal(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
783 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
788 const FLAC__int32 *r = residual, *history;
790 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
791 fprintf(stderr,"FLAC__lpc_restore_signal: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
793 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
794 fprintf(stderr,"\n");
796 FLAC__ASSERT(order > 0);
798 for(i = 0; i < data_len; i++) {
802 for(j = 0; j < order; j++) {
803 sum += qlp_coeff[j] * (*(--history));
804 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
805 if(sumo > 2147483647ll || sumo < -2147483648ll)
806 fprintf(stderr,"FLAC__lpc_restore_signal: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%" PRId64 "\n",i,j,qlp_coeff[j],*history,sumo);
808 *(data++) = *(r++) + (sum >> lp_quantization);
811 /* Here's a slower but clearer version:
812 for(i = 0; i < data_len; i++) {
814 for(j = 0; j < order; j++)
815 sum += qlp_coeff[j] * data[i-j-1];
816 data[i] = residual[i] + (sum >> lp_quantization);
820 #else /* fully unrolled version for normal use */
825 FLAC__ASSERT(order > 0);
826 FLAC__ASSERT(order <= 32);
829 * We do unique versions up to 12th order since that's the subset limit.
830 * Also they are roughly ordered to match frequency of occurrence to
831 * minimize branching.
837 for(i = 0; i < (int)data_len; i++) {
839 sum += qlp_coeff[11] * data[i-12];
840 sum += qlp_coeff[10] * data[i-11];
841 sum += qlp_coeff[9] * data[i-10];
842 sum += qlp_coeff[8] * data[i-9];
843 sum += qlp_coeff[7] * data[i-8];
844 sum += qlp_coeff[6] * data[i-7];
845 sum += qlp_coeff[5] * data[i-6];
846 sum += qlp_coeff[4] * data[i-5];
847 sum += qlp_coeff[3] * data[i-4];
848 sum += qlp_coeff[2] * data[i-3];
849 sum += qlp_coeff[1] * data[i-2];
850 sum += qlp_coeff[0] * data[i-1];
851 data[i] = residual[i] + (sum >> lp_quantization);
854 else { /* order == 11 */
855 for(i = 0; i < (int)data_len; i++) {
857 sum += qlp_coeff[10] * data[i-11];
858 sum += qlp_coeff[9] * data[i-10];
859 sum += qlp_coeff[8] * data[i-9];
860 sum += qlp_coeff[7] * data[i-8];
861 sum += qlp_coeff[6] * data[i-7];
862 sum += qlp_coeff[5] * data[i-6];
863 sum += qlp_coeff[4] * data[i-5];
864 sum += qlp_coeff[3] * data[i-4];
865 sum += qlp_coeff[2] * data[i-3];
866 sum += qlp_coeff[1] * data[i-2];
867 sum += qlp_coeff[0] * data[i-1];
868 data[i] = residual[i] + (sum >> lp_quantization);
874 for(i = 0; i < (int)data_len; i++) {
876 sum += qlp_coeff[9] * data[i-10];
877 sum += qlp_coeff[8] * data[i-9];
878 sum += qlp_coeff[7] * data[i-8];
879 sum += qlp_coeff[6] * data[i-7];
880 sum += qlp_coeff[5] * data[i-6];
881 sum += qlp_coeff[4] * data[i-5];
882 sum += qlp_coeff[3] * data[i-4];
883 sum += qlp_coeff[2] * data[i-3];
884 sum += qlp_coeff[1] * data[i-2];
885 sum += qlp_coeff[0] * data[i-1];
886 data[i] = residual[i] + (sum >> lp_quantization);
889 else { /* order == 9 */
890 for(i = 0; i < (int)data_len; i++) {
892 sum += qlp_coeff[8] * data[i-9];
893 sum += qlp_coeff[7] * data[i-8];
894 sum += qlp_coeff[6] * data[i-7];
895 sum += qlp_coeff[5] * data[i-6];
896 sum += qlp_coeff[4] * data[i-5];
897 sum += qlp_coeff[3] * data[i-4];
898 sum += qlp_coeff[2] * data[i-3];
899 sum += qlp_coeff[1] * data[i-2];
900 sum += qlp_coeff[0] * data[i-1];
901 data[i] = residual[i] + (sum >> lp_quantization);
909 for(i = 0; i < (int)data_len; i++) {
911 sum += qlp_coeff[7] * data[i-8];
912 sum += qlp_coeff[6] * data[i-7];
913 sum += qlp_coeff[5] * data[i-6];
914 sum += qlp_coeff[4] * data[i-5];
915 sum += qlp_coeff[3] * data[i-4];
916 sum += qlp_coeff[2] * data[i-3];
917 sum += qlp_coeff[1] * data[i-2];
918 sum += qlp_coeff[0] * data[i-1];
919 data[i] = residual[i] + (sum >> lp_quantization);
922 else { /* order == 7 */
923 for(i = 0; i < (int)data_len; i++) {
925 sum += qlp_coeff[6] * data[i-7];
926 sum += qlp_coeff[5] * data[i-6];
927 sum += qlp_coeff[4] * data[i-5];
928 sum += qlp_coeff[3] * data[i-4];
929 sum += qlp_coeff[2] * data[i-3];
930 sum += qlp_coeff[1] * data[i-2];
931 sum += qlp_coeff[0] * data[i-1];
932 data[i] = residual[i] + (sum >> lp_quantization);
938 for(i = 0; i < (int)data_len; i++) {
940 sum += qlp_coeff[5] * data[i-6];
941 sum += qlp_coeff[4] * data[i-5];
942 sum += qlp_coeff[3] * data[i-4];
943 sum += qlp_coeff[2] * data[i-3];
944 sum += qlp_coeff[1] * data[i-2];
945 sum += qlp_coeff[0] * data[i-1];
946 data[i] = residual[i] + (sum >> lp_quantization);
949 else { /* order == 5 */
950 for(i = 0; i < (int)data_len; i++) {
952 sum += qlp_coeff[4] * data[i-5];
953 sum += qlp_coeff[3] * data[i-4];
954 sum += qlp_coeff[2] * data[i-3];
955 sum += qlp_coeff[1] * data[i-2];
956 sum += qlp_coeff[0] * data[i-1];
957 data[i] = residual[i] + (sum >> lp_quantization);
965 for(i = 0; i < (int)data_len; i++) {
967 sum += qlp_coeff[3] * data[i-4];
968 sum += qlp_coeff[2] * data[i-3];
969 sum += qlp_coeff[1] * data[i-2];
970 sum += qlp_coeff[0] * data[i-1];
971 data[i] = residual[i] + (sum >> lp_quantization);
974 else { /* order == 3 */
975 for(i = 0; i < (int)data_len; i++) {
977 sum += qlp_coeff[2] * data[i-3];
978 sum += qlp_coeff[1] * data[i-2];
979 sum += qlp_coeff[0] * data[i-1];
980 data[i] = residual[i] + (sum >> lp_quantization);
986 for(i = 0; i < (int)data_len; i++) {
988 sum += qlp_coeff[1] * data[i-2];
989 sum += qlp_coeff[0] * data[i-1];
990 data[i] = residual[i] + (sum >> lp_quantization);
993 else { /* order == 1 */
994 for(i = 0; i < (int)data_len; i++)
995 data[i] = residual[i] + ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
1000 else { /* order > 12 */
1001 for(i = 0; i < (int)data_len; i++) {
1004 case 32: sum += qlp_coeff[31] * data[i-32];
1005 case 31: sum += qlp_coeff[30] * data[i-31];
1006 case 30: sum += qlp_coeff[29] * data[i-30];
1007 case 29: sum += qlp_coeff[28] * data[i-29];
1008 case 28: sum += qlp_coeff[27] * data[i-28];
1009 case 27: sum += qlp_coeff[26] * data[i-27];
1010 case 26: sum += qlp_coeff[25] * data[i-26];
1011 case 25: sum += qlp_coeff[24] * data[i-25];
1012 case 24: sum += qlp_coeff[23] * data[i-24];
1013 case 23: sum += qlp_coeff[22] * data[i-23];
1014 case 22: sum += qlp_coeff[21] * data[i-22];
1015 case 21: sum += qlp_coeff[20] * data[i-21];
1016 case 20: sum += qlp_coeff[19] * data[i-20];
1017 case 19: sum += qlp_coeff[18] * data[i-19];
1018 case 18: sum += qlp_coeff[17] * data[i-18];
1019 case 17: sum += qlp_coeff[16] * data[i-17];
1020 case 16: sum += qlp_coeff[15] * data[i-16];
1021 case 15: sum += qlp_coeff[14] * data[i-15];
1022 case 14: sum += qlp_coeff[13] * data[i-14];
1023 case 13: sum += qlp_coeff[12] * data[i-13];
1024 sum += qlp_coeff[11] * data[i-12];
1025 sum += qlp_coeff[10] * data[i-11];
1026 sum += qlp_coeff[ 9] * data[i-10];
1027 sum += qlp_coeff[ 8] * data[i- 9];
1028 sum += qlp_coeff[ 7] * data[i- 8];
1029 sum += qlp_coeff[ 6] * data[i- 7];
1030 sum += qlp_coeff[ 5] * data[i- 6];
1031 sum += qlp_coeff[ 4] * data[i- 5];
1032 sum += qlp_coeff[ 3] * data[i- 4];
1033 sum += qlp_coeff[ 2] * data[i- 3];
1034 sum += qlp_coeff[ 1] * data[i- 2];
1035 sum += qlp_coeff[ 0] * data[i- 1];
1037 data[i] = residual[i] + (sum >> lp_quantization);
1043 void FLAC__lpc_restore_signal_wide(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
1044 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
1048 const FLAC__int32 *r = residual, *history;
1050 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
1051 fprintf(stderr,"FLAC__lpc_restore_signal_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
1052 for(i=0;i<order;i++)
1053 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
1054 fprintf(stderr,"\n");
1056 FLAC__ASSERT(order > 0);
1058 for(i = 0; i < data_len; i++) {
1061 for(j = 0; j < order; j++)
1062 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
1063 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
1064 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, sum=%" PRId64 "\n", i, (sum >> lp_quantization));
1067 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*r) + (sum >> lp_quantization)) > 32) {
1068 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, residual=%d, sum=%" PRId64 ", data=%" PRId64 "\n", i, *r, (sum >> lp_quantization), ((FLAC__int64)(*r) + (sum >> lp_quantization)));
1071 *(data++) = *(r++) + (FLAC__int32)(sum >> lp_quantization);
1074 #else /* fully unrolled version for normal use */
1079 FLAC__ASSERT(order > 0);
1080 FLAC__ASSERT(order <= 32);
1083 * We do unique versions up to 12th order since that's the subset limit.
1084 * Also they are roughly ordered to match frequency of occurrence to
1085 * minimize branching.
1091 for(i = 0; i < (int)data_len; i++) {
1093 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1094 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1095 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1096 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1097 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1098 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1099 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1100 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1101 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1102 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1103 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1104 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1105 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1108 else { /* order == 11 */
1109 for(i = 0; i < (int)data_len; i++) {
1111 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1112 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1113 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1114 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1115 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1116 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1117 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1118 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1119 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1120 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1121 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1122 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1128 for(i = 0; i < (int)data_len; i++) {
1130 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1131 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1132 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1133 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1134 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1135 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1136 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1137 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1138 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1139 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1140 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1143 else { /* order == 9 */
1144 for(i = 0; i < (int)data_len; i++) {
1146 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1147 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1148 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1149 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1150 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1151 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1152 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1153 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1154 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1155 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1160 else if(order > 4) {
1163 for(i = 0; i < (int)data_len; i++) {
1165 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1166 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1167 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1168 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1169 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1170 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1171 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1172 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1173 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1176 else { /* order == 7 */
1177 for(i = 0; i < (int)data_len; i++) {
1179 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1180 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1181 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1182 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1183 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1184 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1185 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1186 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1192 for(i = 0; i < (int)data_len; i++) {
1194 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1195 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1196 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1197 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1198 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1199 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1200 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1203 else { /* order == 5 */
1204 for(i = 0; i < (int)data_len; i++) {
1206 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1207 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1208 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1209 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1210 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1211 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1219 for(i = 0; i < (int)data_len; i++) {
1221 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1222 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1223 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1224 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1225 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1228 else { /* order == 3 */
1229 for(i = 0; i < (int)data_len; i++) {
1231 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1232 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1233 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1234 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1240 for(i = 0; i < (int)data_len; i++) {
1242 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1243 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1244 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1247 else { /* order == 1 */
1248 for(i = 0; i < (int)data_len; i++)
1249 data[i] = residual[i] + (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
1254 else { /* order > 12 */
1255 for(i = 0; i < (int)data_len; i++) {
1258 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
1259 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
1260 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
1261 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
1262 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
1263 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
1264 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
1265 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
1266 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
1267 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
1268 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
1269 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
1270 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
1271 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
1272 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
1273 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
1274 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
1275 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
1276 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
1277 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
1278 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1279 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1280 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
1281 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
1282 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
1283 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
1284 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
1285 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
1286 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
1287 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
1288 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
1289 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
1291 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1297 #ifndef FLAC__INTEGER_ONLY_LIBRARY
1299 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample(FLAC__double lpc_error, unsigned total_samples)
1301 FLAC__double error_scale;
1303 FLAC__ASSERT(total_samples > 0);
1305 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1307 return FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error, error_scale);
1310 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(FLAC__double lpc_error, FLAC__double error_scale)
1312 if(lpc_error > 0.0) {
1313 FLAC__double bps = (FLAC__double)0.5 * log(error_scale * lpc_error) / M_LN2;
1319 else if(lpc_error < 0.0) { /* error should not be negative but can happen due to inadequate floating-point resolution */
1327 unsigned FLAC__lpc_compute_best_order(const FLAC__double lpc_error[], unsigned max_order, unsigned total_samples, unsigned overhead_bits_per_order)
1329 unsigned order, index, best_index; /* 'index' the index into lpc_error; index==order-1 since lpc_error[0] is for order==1, lpc_error[1] is for order==2, etc */
1330 FLAC__double bits, best_bits, error_scale;
1332 FLAC__ASSERT(max_order > 0);
1333 FLAC__ASSERT(total_samples > 0);
1335 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1338 best_bits = (unsigned)(-1);
1340 for(index = 0, order = 1; index < max_order; index++, order++) {
1341 bits = FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error[index], error_scale) * (FLAC__double)(total_samples - order) + (FLAC__double)(order * overhead_bits_per_order);
1342 if(bits < best_bits) {
1348 return best_index+1; /* +1 since index of lpc_error[] is order-1 */
1351 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */