1 /* libFLAC - Free Lossless Audio Codec library
2 * Copyright (C) 2000,2001,2002,2003,2004,2005,2006,2007,2008,2009 Josh Coalson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * - Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * - Neither the name of the Xiph.org Foundation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include "FLAC/assert.h"
38 #include "FLAC/format.h"
39 #include "private/bitmath.h"
40 #include "private/lpc.h"
41 #if defined DEBUG || defined FLAC__OVERFLOW_DETECT || defined FLAC__OVERFLOW_DETECT_VERBOSE
45 /* OPT: #undef'ing this may improve the speed on some architectures */
46 #define FLAC__LPC_UNROLLED_FILTER_LOOPS
48 #ifndef FLAC__INTEGER_ONLY_LIBRARY
51 /* math.h in VC++ doesn't seem to have this (how Microsoft is that?) */
52 #define M_LN2 0.69314718055994530942
56 void FLAC__lpc_window_data(const FLAC__int32 in[], const FLAC__real window[], FLAC__real out[], unsigned data_len)
59 for(i = 0; i < data_len; i++)
60 out[i] = in[i] * window[i];
63 void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[])
65 /* a readable, but slower, version */
70 FLAC__ASSERT(lag > 0);
71 FLAC__ASSERT(lag <= data_len);
74 * Technically we should subtract the mean first like so:
75 * for(i = 0; i < data_len; i++)
77 * but it appears not to make enough of a difference to matter, and
78 * most signals are already closely centered around zero
81 for(i = lag, d = 0.0; i < data_len; i++)
82 d += data[i] * data[i - lag];
88 * this version tends to run faster because of better data locality
89 * ('data_len' is usually much larger than 'lag')
92 unsigned sample, coeff;
93 const unsigned limit = data_len - lag;
95 FLAC__ASSERT(lag > 0);
96 FLAC__ASSERT(lag <= data_len);
98 for(coeff = 0; coeff < lag; coeff++)
100 for(sample = 0; sample <= limit; sample++) {
102 for(coeff = 0; coeff < lag; coeff++)
103 autoc[coeff] += d * data[sample+coeff];
105 for(; sample < data_len; sample++) {
107 for(coeff = 0; coeff < data_len - sample; coeff++)
108 autoc[coeff] += d * data[sample+coeff];
112 void FLAC__lpc_compute_lp_coefficients(const FLAC__real autoc[], unsigned *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], FLAC__double error[])
115 FLAC__double r, err, lpc[FLAC__MAX_LPC_ORDER];
117 FLAC__ASSERT(0 != max_order);
118 FLAC__ASSERT(0 < *max_order);
119 FLAC__ASSERT(*max_order <= FLAC__MAX_LPC_ORDER);
120 FLAC__ASSERT(autoc[0] != 0.0);
124 for(i = 0; i < *max_order; i++) {
125 /* Sum up this iteration's reflection coefficient. */
127 for(j = 0; j < i; j++)
128 r -= lpc[j] * autoc[i-j];
130 /* Update LPC coefficients and total error. */
132 for(j = 0; j < (i>>1); j++) {
133 FLAC__double tmp = lpc[j];
134 lpc[j] += r * lpc[i-1-j];
135 lpc[i-1-j] += r * tmp;
138 lpc[j] += lpc[j] * r;
140 err *= (1.0 - r * r);
142 /* save this order */
143 for(j = 0; j <= i; j++)
144 lp_coeff[i][j] = (FLAC__real)(-lpc[j]); /* negate FIR filter coeff to get predictor coeff */
147 /* see SF bug #1601812 http://sourceforge.net/tracker/index.php?func=detail&aid=1601812&group_id=13478&atid=113478 */
155 int FLAC__lpc_quantize_coefficients(const FLAC__real lp_coeff[], unsigned order, unsigned precision, FLAC__int32 qlp_coeff[], int *shift)
159 FLAC__int32 qmax, qmin;
161 FLAC__ASSERT(precision > 0);
162 FLAC__ASSERT(precision >= FLAC__MIN_QLP_COEFF_PRECISION);
164 /* drop one bit for the sign; from here on out we consider only |lp_coeff[i]| */
166 qmax = 1 << precision;
170 /* calc cmax = max( |lp_coeff[i]| ) */
172 for(i = 0; i < order; i++) {
173 const FLAC__double d = fabs(lp_coeff[i]);
179 /* => coefficients are all 0, which means our constant-detect didn't work */
183 const int max_shiftlimit = (1 << (FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN-1)) - 1;
184 const int min_shiftlimit = -max_shiftlimit - 1;
187 (void)frexp(cmax, &log2cmax);
189 *shift = (int)precision - log2cmax - 1;
191 if(*shift > max_shiftlimit)
192 *shift = max_shiftlimit;
193 else if(*shift < min_shiftlimit)
198 FLAC__double error = 0.0;
200 for(i = 0; i < order; i++) {
201 error += lp_coeff[i] * (1 << *shift);
202 #if 1 /* unfortunately lround() is C99 */
204 q = (FLAC__int32)(error + 0.5);
206 q = (FLAC__int32)(error - 0.5);
210 #ifdef FLAC__OVERFLOW_DETECT
211 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
212 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
214 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
224 /* negative shift is very rare but due to design flaw, negative shift is
225 * a NOP in the decoder, so it must be handled specially by scaling down
229 const int nshift = -(*shift);
230 FLAC__double error = 0.0;
233 fprintf(stderr,"FLAC__lpc_quantize_coefficients: negative shift=%d order=%u cmax=%f\n", *shift, order, cmax);
235 for(i = 0; i < order; i++) {
236 error += lp_coeff[i] / (1 << nshift);
237 #if 1 /* unfortunately lround() is C99 */
239 q = (FLAC__int32)(error + 0.5);
241 q = (FLAC__int32)(error - 0.5);
245 #ifdef FLAC__OVERFLOW_DETECT
246 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
247 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
249 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
264 void FLAC__lpc_compute_residual_from_qlp_coefficients(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
265 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
270 const FLAC__int32 *history;
272 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
273 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
275 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
276 fprintf(stderr,"\n");
278 FLAC__ASSERT(order > 0);
280 for(i = 0; i < data_len; i++) {
284 for(j = 0; j < order; j++) {
285 sum += qlp_coeff[j] * (*(--history));
286 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
287 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%" PRId64 "\n",i,j,qlp_coeff[j],*history,sumo);
289 *(residual++) = *(data++) - (sum >> lp_quantization);
292 /* Here's a slower but clearer version:
293 for(i = 0; i < data_len; i++) {
295 for(j = 0; j < order; j++)
296 sum += qlp_coeff[j] * data[i-j-1];
297 residual[i] = data[i] - (sum >> lp_quantization);
301 #else /* fully unrolled version for normal use */
306 FLAC__ASSERT(order > 0);
307 FLAC__ASSERT(order <= 32);
310 * We do unique versions up to 12th order since that's the subset limit.
311 * Also they are roughly ordered to match frequency of occurrence to
312 * minimize branching.
318 for(i = 0; i < (int)data_len; i++) {
320 sum += qlp_coeff[11] * data[i-12];
321 sum += qlp_coeff[10] * data[i-11];
322 sum += qlp_coeff[9] * data[i-10];
323 sum += qlp_coeff[8] * data[i-9];
324 sum += qlp_coeff[7] * data[i-8];
325 sum += qlp_coeff[6] * data[i-7];
326 sum += qlp_coeff[5] * data[i-6];
327 sum += qlp_coeff[4] * data[i-5];
328 sum += qlp_coeff[3] * data[i-4];
329 sum += qlp_coeff[2] * data[i-3];
330 sum += qlp_coeff[1] * data[i-2];
331 sum += qlp_coeff[0] * data[i-1];
332 residual[i] = data[i] - (sum >> lp_quantization);
335 else { /* order == 11 */
336 for(i = 0; i < (int)data_len; i++) {
338 sum += qlp_coeff[10] * data[i-11];
339 sum += qlp_coeff[9] * data[i-10];
340 sum += qlp_coeff[8] * data[i-9];
341 sum += qlp_coeff[7] * data[i-8];
342 sum += qlp_coeff[6] * data[i-7];
343 sum += qlp_coeff[5] * data[i-6];
344 sum += qlp_coeff[4] * data[i-5];
345 sum += qlp_coeff[3] * data[i-4];
346 sum += qlp_coeff[2] * data[i-3];
347 sum += qlp_coeff[1] * data[i-2];
348 sum += qlp_coeff[0] * data[i-1];
349 residual[i] = data[i] - (sum >> lp_quantization);
355 for(i = 0; i < (int)data_len; i++) {
357 sum += qlp_coeff[9] * data[i-10];
358 sum += qlp_coeff[8] * data[i-9];
359 sum += qlp_coeff[7] * data[i-8];
360 sum += qlp_coeff[6] * data[i-7];
361 sum += qlp_coeff[5] * data[i-6];
362 sum += qlp_coeff[4] * data[i-5];
363 sum += qlp_coeff[3] * data[i-4];
364 sum += qlp_coeff[2] * data[i-3];
365 sum += qlp_coeff[1] * data[i-2];
366 sum += qlp_coeff[0] * data[i-1];
367 residual[i] = data[i] - (sum >> lp_quantization);
370 else { /* order == 9 */
371 for(i = 0; i < (int)data_len; i++) {
373 sum += qlp_coeff[8] * data[i-9];
374 sum += qlp_coeff[7] * data[i-8];
375 sum += qlp_coeff[6] * data[i-7];
376 sum += qlp_coeff[5] * data[i-6];
377 sum += qlp_coeff[4] * data[i-5];
378 sum += qlp_coeff[3] * data[i-4];
379 sum += qlp_coeff[2] * data[i-3];
380 sum += qlp_coeff[1] * data[i-2];
381 sum += qlp_coeff[0] * data[i-1];
382 residual[i] = data[i] - (sum >> lp_quantization);
390 for(i = 0; i < (int)data_len; i++) {
392 sum += qlp_coeff[7] * data[i-8];
393 sum += qlp_coeff[6] * data[i-7];
394 sum += qlp_coeff[5] * data[i-6];
395 sum += qlp_coeff[4] * data[i-5];
396 sum += qlp_coeff[3] * data[i-4];
397 sum += qlp_coeff[2] * data[i-3];
398 sum += qlp_coeff[1] * data[i-2];
399 sum += qlp_coeff[0] * data[i-1];
400 residual[i] = data[i] - (sum >> lp_quantization);
403 else { /* order == 7 */
404 for(i = 0; i < (int)data_len; i++) {
406 sum += qlp_coeff[6] * data[i-7];
407 sum += qlp_coeff[5] * data[i-6];
408 sum += qlp_coeff[4] * data[i-5];
409 sum += qlp_coeff[3] * data[i-4];
410 sum += qlp_coeff[2] * data[i-3];
411 sum += qlp_coeff[1] * data[i-2];
412 sum += qlp_coeff[0] * data[i-1];
413 residual[i] = data[i] - (sum >> lp_quantization);
419 for(i = 0; i < (int)data_len; i++) {
421 sum += qlp_coeff[5] * data[i-6];
422 sum += qlp_coeff[4] * data[i-5];
423 sum += qlp_coeff[3] * data[i-4];
424 sum += qlp_coeff[2] * data[i-3];
425 sum += qlp_coeff[1] * data[i-2];
426 sum += qlp_coeff[0] * data[i-1];
427 residual[i] = data[i] - (sum >> lp_quantization);
430 else { /* order == 5 */
431 for(i = 0; i < (int)data_len; i++) {
433 sum += qlp_coeff[4] * data[i-5];
434 sum += qlp_coeff[3] * data[i-4];
435 sum += qlp_coeff[2] * data[i-3];
436 sum += qlp_coeff[1] * data[i-2];
437 sum += qlp_coeff[0] * data[i-1];
438 residual[i] = data[i] - (sum >> lp_quantization);
446 for(i = 0; i < (int)data_len; i++) {
448 sum += qlp_coeff[3] * data[i-4];
449 sum += qlp_coeff[2] * data[i-3];
450 sum += qlp_coeff[1] * data[i-2];
451 sum += qlp_coeff[0] * data[i-1];
452 residual[i] = data[i] - (sum >> lp_quantization);
455 else { /* order == 3 */
456 for(i = 0; i < (int)data_len; i++) {
458 sum += qlp_coeff[2] * data[i-3];
459 sum += qlp_coeff[1] * data[i-2];
460 sum += qlp_coeff[0] * data[i-1];
461 residual[i] = data[i] - (sum >> lp_quantization);
467 for(i = 0; i < (int)data_len; i++) {
469 sum += qlp_coeff[1] * data[i-2];
470 sum += qlp_coeff[0] * data[i-1];
471 residual[i] = data[i] - (sum >> lp_quantization);
474 else { /* order == 1 */
475 for(i = 0; i < (int)data_len; i++)
476 residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
481 else { /* order > 12 */
482 for(i = 0; i < (int)data_len; i++) {
485 case 32: sum += qlp_coeff[31] * data[i-32];
486 case 31: sum += qlp_coeff[30] * data[i-31];
487 case 30: sum += qlp_coeff[29] * data[i-30];
488 case 29: sum += qlp_coeff[28] * data[i-29];
489 case 28: sum += qlp_coeff[27] * data[i-28];
490 case 27: sum += qlp_coeff[26] * data[i-27];
491 case 26: sum += qlp_coeff[25] * data[i-26];
492 case 25: sum += qlp_coeff[24] * data[i-25];
493 case 24: sum += qlp_coeff[23] * data[i-24];
494 case 23: sum += qlp_coeff[22] * data[i-23];
495 case 22: sum += qlp_coeff[21] * data[i-22];
496 case 21: sum += qlp_coeff[20] * data[i-21];
497 case 20: sum += qlp_coeff[19] * data[i-20];
498 case 19: sum += qlp_coeff[18] * data[i-19];
499 case 18: sum += qlp_coeff[17] * data[i-18];
500 case 17: sum += qlp_coeff[16] * data[i-17];
501 case 16: sum += qlp_coeff[15] * data[i-16];
502 case 15: sum += qlp_coeff[14] * data[i-15];
503 case 14: sum += qlp_coeff[13] * data[i-14];
504 case 13: sum += qlp_coeff[12] * data[i-13];
505 sum += qlp_coeff[11] * data[i-12];
506 sum += qlp_coeff[10] * data[i-11];
507 sum += qlp_coeff[ 9] * data[i-10];
508 sum += qlp_coeff[ 8] * data[i- 9];
509 sum += qlp_coeff[ 7] * data[i- 8];
510 sum += qlp_coeff[ 6] * data[i- 7];
511 sum += qlp_coeff[ 5] * data[i- 6];
512 sum += qlp_coeff[ 4] * data[i- 5];
513 sum += qlp_coeff[ 3] * data[i- 4];
514 sum += qlp_coeff[ 2] * data[i- 3];
515 sum += qlp_coeff[ 1] * data[i- 2];
516 sum += qlp_coeff[ 0] * data[i- 1];
518 residual[i] = data[i] - (sum >> lp_quantization);
524 void FLAC__lpc_compute_residual_from_qlp_coefficients_wide(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
525 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
529 const FLAC__int32 *history;
531 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
532 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
534 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
535 fprintf(stderr,"\n");
537 FLAC__ASSERT(order > 0);
539 for(i = 0; i < data_len; i++) {
542 for(j = 0; j < order; j++)
543 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
544 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
545 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, sum=%" PRId64 "\n", i, (sum >> lp_quantization));
548 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*data) - (sum >> lp_quantization)) > 32) {
549 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, data=%d, sum=%" PRId64 ", residual=%" PRId64 "\n", i, *data, (long long)(sum >> lp_quantization), ((FLAC__int64)(*data) - (sum >> lp_quantization)));
552 *(residual++) = *(data++) - (FLAC__int32)(sum >> lp_quantization);
555 #else /* fully unrolled version for normal use */
560 FLAC__ASSERT(order > 0);
561 FLAC__ASSERT(order <= 32);
564 * We do unique versions up to 12th order since that's the subset limit.
565 * Also they are roughly ordered to match frequency of occurrence to
566 * minimize branching.
572 for(i = 0; i < (int)data_len; i++) {
574 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
575 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
576 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
577 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
578 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
579 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
580 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
581 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
582 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
583 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
584 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
585 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
586 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
589 else { /* order == 11 */
590 for(i = 0; i < (int)data_len; i++) {
592 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
593 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
594 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
595 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
596 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
597 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
598 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
599 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
600 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
601 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
602 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
603 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
609 for(i = 0; i < (int)data_len; i++) {
611 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
612 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
613 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
614 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
615 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
616 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
617 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
618 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
619 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
620 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
621 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
624 else { /* order == 9 */
625 for(i = 0; i < (int)data_len; i++) {
627 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
628 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
629 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
630 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
631 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
632 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
633 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
634 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
635 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
636 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
644 for(i = 0; i < (int)data_len; i++) {
646 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
647 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
648 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
649 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
650 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
651 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
652 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
653 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
654 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
657 else { /* order == 7 */
658 for(i = 0; i < (int)data_len; i++) {
660 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
661 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
662 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
663 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
664 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
665 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
666 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
667 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
673 for(i = 0; i < (int)data_len; i++) {
675 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
676 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
677 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
678 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
679 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
680 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
681 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
684 else { /* order == 5 */
685 for(i = 0; i < (int)data_len; i++) {
687 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
688 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
689 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
690 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
691 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
692 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
700 for(i = 0; i < (int)data_len; i++) {
702 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
703 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
704 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
705 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
706 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
709 else { /* order == 3 */
710 for(i = 0; i < (int)data_len; i++) {
712 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
713 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
714 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
715 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
721 for(i = 0; i < (int)data_len; i++) {
723 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
724 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
725 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
728 else { /* order == 1 */
729 for(i = 0; i < (int)data_len; i++)
730 residual[i] = data[i] - (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
735 else { /* order > 12 */
736 for(i = 0; i < (int)data_len; i++) {
739 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
740 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
741 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
742 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
743 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
744 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
745 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
746 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
747 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
748 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
749 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
750 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
751 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
752 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
753 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
754 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
755 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
756 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
757 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
758 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
759 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
760 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
761 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
762 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
763 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
764 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
765 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
766 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
767 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
768 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
769 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
770 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
772 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
778 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */
780 void FLAC__lpc_restore_signal(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
781 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
786 const FLAC__int32 *r = residual, *history;
788 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
789 fprintf(stderr,"FLAC__lpc_restore_signal: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
791 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
792 fprintf(stderr,"\n");
794 FLAC__ASSERT(order > 0);
796 for(i = 0; i < data_len; i++) {
800 for(j = 0; j < order; j++) {
801 sum += qlp_coeff[j] * (*(--history));
802 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
803 if(sumo > 2147483647ll || sumo < -2147483648ll)
804 fprintf(stderr,"FLAC__lpc_restore_signal: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%" PRId64 "\n",i,j,qlp_coeff[j],*history,sumo);
806 *(data++) = *(r++) + (sum >> lp_quantization);
809 /* Here's a slower but clearer version:
810 for(i = 0; i < data_len; i++) {
812 for(j = 0; j < order; j++)
813 sum += qlp_coeff[j] * data[i-j-1];
814 data[i] = residual[i] + (sum >> lp_quantization);
818 #else /* fully unrolled version for normal use */
823 FLAC__ASSERT(order > 0);
824 FLAC__ASSERT(order <= 32);
827 * We do unique versions up to 12th order since that's the subset limit.
828 * Also they are roughly ordered to match frequency of occurrence to
829 * minimize branching.
835 for(i = 0; i < (int)data_len; i++) {
837 sum += qlp_coeff[11] * data[i-12];
838 sum += qlp_coeff[10] * data[i-11];
839 sum += qlp_coeff[9] * data[i-10];
840 sum += qlp_coeff[8] * data[i-9];
841 sum += qlp_coeff[7] * data[i-8];
842 sum += qlp_coeff[6] * data[i-7];
843 sum += qlp_coeff[5] * data[i-6];
844 sum += qlp_coeff[4] * data[i-5];
845 sum += qlp_coeff[3] * data[i-4];
846 sum += qlp_coeff[2] * data[i-3];
847 sum += qlp_coeff[1] * data[i-2];
848 sum += qlp_coeff[0] * data[i-1];
849 data[i] = residual[i] + (sum >> lp_quantization);
852 else { /* order == 11 */
853 for(i = 0; i < (int)data_len; i++) {
855 sum += qlp_coeff[10] * data[i-11];
856 sum += qlp_coeff[9] * data[i-10];
857 sum += qlp_coeff[8] * data[i-9];
858 sum += qlp_coeff[7] * data[i-8];
859 sum += qlp_coeff[6] * data[i-7];
860 sum += qlp_coeff[5] * data[i-6];
861 sum += qlp_coeff[4] * data[i-5];
862 sum += qlp_coeff[3] * data[i-4];
863 sum += qlp_coeff[2] * data[i-3];
864 sum += qlp_coeff[1] * data[i-2];
865 sum += qlp_coeff[0] * data[i-1];
866 data[i] = residual[i] + (sum >> lp_quantization);
872 for(i = 0; i < (int)data_len; i++) {
874 sum += qlp_coeff[9] * data[i-10];
875 sum += qlp_coeff[8] * data[i-9];
876 sum += qlp_coeff[7] * data[i-8];
877 sum += qlp_coeff[6] * data[i-7];
878 sum += qlp_coeff[5] * data[i-6];
879 sum += qlp_coeff[4] * data[i-5];
880 sum += qlp_coeff[3] * data[i-4];
881 sum += qlp_coeff[2] * data[i-3];
882 sum += qlp_coeff[1] * data[i-2];
883 sum += qlp_coeff[0] * data[i-1];
884 data[i] = residual[i] + (sum >> lp_quantization);
887 else { /* order == 9 */
888 for(i = 0; i < (int)data_len; i++) {
890 sum += qlp_coeff[8] * data[i-9];
891 sum += qlp_coeff[7] * data[i-8];
892 sum += qlp_coeff[6] * data[i-7];
893 sum += qlp_coeff[5] * data[i-6];
894 sum += qlp_coeff[4] * data[i-5];
895 sum += qlp_coeff[3] * data[i-4];
896 sum += qlp_coeff[2] * data[i-3];
897 sum += qlp_coeff[1] * data[i-2];
898 sum += qlp_coeff[0] * data[i-1];
899 data[i] = residual[i] + (sum >> lp_quantization);
907 for(i = 0; i < (int)data_len; i++) {
909 sum += qlp_coeff[7] * data[i-8];
910 sum += qlp_coeff[6] * data[i-7];
911 sum += qlp_coeff[5] * data[i-6];
912 sum += qlp_coeff[4] * data[i-5];
913 sum += qlp_coeff[3] * data[i-4];
914 sum += qlp_coeff[2] * data[i-3];
915 sum += qlp_coeff[1] * data[i-2];
916 sum += qlp_coeff[0] * data[i-1];
917 data[i] = residual[i] + (sum >> lp_quantization);
920 else { /* order == 7 */
921 for(i = 0; i < (int)data_len; i++) {
923 sum += qlp_coeff[6] * data[i-7];
924 sum += qlp_coeff[5] * data[i-6];
925 sum += qlp_coeff[4] * data[i-5];
926 sum += qlp_coeff[3] * data[i-4];
927 sum += qlp_coeff[2] * data[i-3];
928 sum += qlp_coeff[1] * data[i-2];
929 sum += qlp_coeff[0] * data[i-1];
930 data[i] = residual[i] + (sum >> lp_quantization);
936 for(i = 0; i < (int)data_len; i++) {
938 sum += qlp_coeff[5] * data[i-6];
939 sum += qlp_coeff[4] * data[i-5];
940 sum += qlp_coeff[3] * data[i-4];
941 sum += qlp_coeff[2] * data[i-3];
942 sum += qlp_coeff[1] * data[i-2];
943 sum += qlp_coeff[0] * data[i-1];
944 data[i] = residual[i] + (sum >> lp_quantization);
947 else { /* order == 5 */
948 for(i = 0; i < (int)data_len; i++) {
950 sum += qlp_coeff[4] * data[i-5];
951 sum += qlp_coeff[3] * data[i-4];
952 sum += qlp_coeff[2] * data[i-3];
953 sum += qlp_coeff[1] * data[i-2];
954 sum += qlp_coeff[0] * data[i-1];
955 data[i] = residual[i] + (sum >> lp_quantization);
963 for(i = 0; i < (int)data_len; i++) {
965 sum += qlp_coeff[3] * data[i-4];
966 sum += qlp_coeff[2] * data[i-3];
967 sum += qlp_coeff[1] * data[i-2];
968 sum += qlp_coeff[0] * data[i-1];
969 data[i] = residual[i] + (sum >> lp_quantization);
972 else { /* order == 3 */
973 for(i = 0; i < (int)data_len; i++) {
975 sum += qlp_coeff[2] * data[i-3];
976 sum += qlp_coeff[1] * data[i-2];
977 sum += qlp_coeff[0] * data[i-1];
978 data[i] = residual[i] + (sum >> lp_quantization);
984 for(i = 0; i < (int)data_len; i++) {
986 sum += qlp_coeff[1] * data[i-2];
987 sum += qlp_coeff[0] * data[i-1];
988 data[i] = residual[i] + (sum >> lp_quantization);
991 else { /* order == 1 */
992 for(i = 0; i < (int)data_len; i++)
993 data[i] = residual[i] + ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
998 else { /* order > 12 */
999 for(i = 0; i < (int)data_len; i++) {
1002 case 32: sum += qlp_coeff[31] * data[i-32];
1003 case 31: sum += qlp_coeff[30] * data[i-31];
1004 case 30: sum += qlp_coeff[29] * data[i-30];
1005 case 29: sum += qlp_coeff[28] * data[i-29];
1006 case 28: sum += qlp_coeff[27] * data[i-28];
1007 case 27: sum += qlp_coeff[26] * data[i-27];
1008 case 26: sum += qlp_coeff[25] * data[i-26];
1009 case 25: sum += qlp_coeff[24] * data[i-25];
1010 case 24: sum += qlp_coeff[23] * data[i-24];
1011 case 23: sum += qlp_coeff[22] * data[i-23];
1012 case 22: sum += qlp_coeff[21] * data[i-22];
1013 case 21: sum += qlp_coeff[20] * data[i-21];
1014 case 20: sum += qlp_coeff[19] * data[i-20];
1015 case 19: sum += qlp_coeff[18] * data[i-19];
1016 case 18: sum += qlp_coeff[17] * data[i-18];
1017 case 17: sum += qlp_coeff[16] * data[i-17];
1018 case 16: sum += qlp_coeff[15] * data[i-16];
1019 case 15: sum += qlp_coeff[14] * data[i-15];
1020 case 14: sum += qlp_coeff[13] * data[i-14];
1021 case 13: sum += qlp_coeff[12] * data[i-13];
1022 sum += qlp_coeff[11] * data[i-12];
1023 sum += qlp_coeff[10] * data[i-11];
1024 sum += qlp_coeff[ 9] * data[i-10];
1025 sum += qlp_coeff[ 8] * data[i- 9];
1026 sum += qlp_coeff[ 7] * data[i- 8];
1027 sum += qlp_coeff[ 6] * data[i- 7];
1028 sum += qlp_coeff[ 5] * data[i- 6];
1029 sum += qlp_coeff[ 4] * data[i- 5];
1030 sum += qlp_coeff[ 3] * data[i- 4];
1031 sum += qlp_coeff[ 2] * data[i- 3];
1032 sum += qlp_coeff[ 1] * data[i- 2];
1033 sum += qlp_coeff[ 0] * data[i- 1];
1035 data[i] = residual[i] + (sum >> lp_quantization);
1041 void FLAC__lpc_restore_signal_wide(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
1042 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
1046 const FLAC__int32 *r = residual, *history;
1048 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
1049 fprintf(stderr,"FLAC__lpc_restore_signal_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
1050 for(i=0;i<order;i++)
1051 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
1052 fprintf(stderr,"\n");
1054 FLAC__ASSERT(order > 0);
1056 for(i = 0; i < data_len; i++) {
1059 for(j = 0; j < order; j++)
1060 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
1061 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
1062 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, sum=%" PRId64 "\n", i, (sum >> lp_quantization));
1065 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*r) + (sum >> lp_quantization)) > 32) {
1066 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, residual=%d, sum=%" PRId64 ", data=%" PRId64 "\n", i, *r, (sum >> lp_quantization), ((FLAC__int64)(*r) + (sum >> lp_quantization)));
1069 *(data++) = *(r++) + (FLAC__int32)(sum >> lp_quantization);
1072 #else /* fully unrolled version for normal use */
1077 FLAC__ASSERT(order > 0);
1078 FLAC__ASSERT(order <= 32);
1081 * We do unique versions up to 12th order since that's the subset limit.
1082 * Also they are roughly ordered to match frequency of occurrence to
1083 * minimize branching.
1089 for(i = 0; i < (int)data_len; i++) {
1091 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1092 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1093 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1094 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1095 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1096 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1097 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1098 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1099 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1100 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1101 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1102 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1103 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1106 else { /* order == 11 */
1107 for(i = 0; i < (int)data_len; i++) {
1109 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1110 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1111 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1112 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1113 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1114 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1115 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1116 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1117 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1118 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1119 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1120 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1126 for(i = 0; i < (int)data_len; i++) {
1128 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1129 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1130 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1131 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1132 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1133 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1134 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1135 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1136 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1137 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1138 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1141 else { /* order == 9 */
1142 for(i = 0; i < (int)data_len; i++) {
1144 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1145 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1146 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1147 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1148 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1149 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1150 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1151 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1152 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1153 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1158 else if(order > 4) {
1161 for(i = 0; i < (int)data_len; i++) {
1163 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1164 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1165 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1166 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1167 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1168 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1169 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1170 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1171 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1174 else { /* order == 7 */
1175 for(i = 0; i < (int)data_len; i++) {
1177 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1178 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1179 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1180 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1181 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1182 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1183 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1184 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1190 for(i = 0; i < (int)data_len; i++) {
1192 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1193 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1194 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1195 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1196 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1197 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1198 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1201 else { /* order == 5 */
1202 for(i = 0; i < (int)data_len; i++) {
1204 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1205 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1206 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1207 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1208 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1209 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1217 for(i = 0; i < (int)data_len; i++) {
1219 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1220 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1221 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1222 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1223 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1226 else { /* order == 3 */
1227 for(i = 0; i < (int)data_len; i++) {
1229 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1230 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1231 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1232 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1238 for(i = 0; i < (int)data_len; i++) {
1240 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1241 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1242 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1245 else { /* order == 1 */
1246 for(i = 0; i < (int)data_len; i++)
1247 data[i] = residual[i] + (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
1252 else { /* order > 12 */
1253 for(i = 0; i < (int)data_len; i++) {
1256 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
1257 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
1258 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
1259 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
1260 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
1261 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
1262 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
1263 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
1264 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
1265 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
1266 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
1267 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
1268 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
1269 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
1270 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
1271 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
1272 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
1273 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
1274 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
1275 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
1276 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1277 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1278 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
1279 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
1280 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
1281 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
1282 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
1283 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
1284 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
1285 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
1286 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
1287 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
1289 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1295 #ifndef FLAC__INTEGER_ONLY_LIBRARY
1297 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample(FLAC__double lpc_error, unsigned total_samples)
1299 FLAC__double error_scale;
1301 FLAC__ASSERT(total_samples > 0);
1303 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1305 return FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error, error_scale);
1308 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(FLAC__double lpc_error, FLAC__double error_scale)
1310 if(lpc_error > 0.0) {
1311 FLAC__double bps = (FLAC__double)0.5 * log(error_scale * lpc_error) / M_LN2;
1317 else if(lpc_error < 0.0) { /* error should not be negative but can happen due to inadequate floating-point resolution */
1325 unsigned FLAC__lpc_compute_best_order(const FLAC__double lpc_error[], unsigned max_order, unsigned total_samples, unsigned overhead_bits_per_order)
1327 unsigned order, index, best_index; /* 'index' the index into lpc_error; index==order-1 since lpc_error[0] is for order==1, lpc_error[1] is for order==2, etc */
1328 FLAC__double bits, best_bits, error_scale;
1330 FLAC__ASSERT(max_order > 0);
1331 FLAC__ASSERT(total_samples > 0);
1333 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1336 best_bits = (unsigned)(-1);
1338 for(index = 0, order = 1; index < max_order; index++, order++) {
1339 bits = FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error[index], error_scale) * (FLAC__double)(total_samples - order) + (FLAC__double)(order * overhead_bits_per_order);
1340 if(bits < best_bits) {
1346 return best_index+1; /* +1 since index of lpc_error[] is order-1 */
1349 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */