error_3 = error_2 - last_error_2; total_error_3 += local_abs(error_3);
error_4 = error_3 - last_error_3; total_error_4 += local_abs(error_4);
- /* WATCHOUT - total_error_* has been know to overflow when encoding
- * erratic signals when the bits-per-sample is large. We avoid the
- * speed penalty of watching for overflow, and instead rely on the
- * encoder's evaluation of the subframe to catch these cases.
- */
+ last_error_0 = error_0;
+ last_error_1 = error_1;
+ last_error_2 = error_2;
+ last_error_3 = error_3;
+ }
+
+ if(total_error_0 < min(min(min(total_error_1, total_error_2), total_error_3), total_error_4))
+ order = 0;
+ else if(total_error_1 < min(min(total_error_2, total_error_3), total_error_4))
+ order = 1;
+ else if(total_error_2 < min(total_error_3, total_error_4))
+ order = 2;
+ else if(total_error_3 < total_error_4)
+ order = 3;
+ else
+ order = 4;
+
+ /* Estimate the expected number of bits per residual signal sample. */
+ /* 'total_error*' is linearly related to the variance of the residual */
+ /* signal, so we use it directly to compute E(|x|) */
+ residual_bits_per_sample[0] = (real)((data_len > 0) ? log(M_LN2 * (real)total_error_0 / (real) data_len) / M_LN2 : 0.0);
+ residual_bits_per_sample[1] = (real)((data_len > 0) ? log(M_LN2 * (real)total_error_1 / (real) data_len) / M_LN2 : 0.0);
+ residual_bits_per_sample[2] = (real)((data_len > 0) ? log(M_LN2 * (real)total_error_2 / (real) data_len) / M_LN2 : 0.0);
+ residual_bits_per_sample[3] = (real)((data_len > 0) ? log(M_LN2 * (real)total_error_3 / (real) data_len) / M_LN2 : 0.0);
+ residual_bits_per_sample[4] = (real)((data_len > 0) ? log(M_LN2 * (real)total_error_4 / (real) data_len) / M_LN2 : 0.0);
+
+ return order;
+}
+
+unsigned FLAC__fixed_compute_best_predictor_slow(const int32 data[], unsigned data_len, real residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
+{
+ int32 last_error_0 = data[-1];
+ int32 last_error_1 = data[-1] - data[-2];
+ int32 last_error_2 = last_error_1 - (data[-2] - data[-3]);
+ int32 last_error_3 = last_error_2 - (data[-2] - 2*data[-3] + data[-4]);
+ int32 error_0, error_1, error_2, error_3, error_4;
+ /* total_error_* are 64-bits to avoid overflow when encoding
+ * erratic signals when the bits-per-sample and blocksize are
+ * large.
+ */
+ uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
+ unsigned i, order;
+
+ for(i = 0; i < data_len; i++) {
+ error_0 = data[i] ; total_error_0 += local_abs(error_0);
+ error_1 = error_0 - last_error_0; total_error_1 += local_abs(error_1);
+ error_2 = error_1 - last_error_1; total_error_2 += local_abs(error_2);
+ error_3 = error_2 - last_error_2; total_error_3 += local_abs(error_3);
+ error_4 = error_3 - last_error_3; total_error_4 += local_abs(error_4);
last_error_0 = error_0;
last_error_1 = error_1;
* FLAC__fixed_compute_best_predictor()
* --------------------------------------------------------------------
* Compute the best fixed predictor and the expected bits-per-sample
- * of the residual signal for each order.
+ * of the residual signal for each order. The _slow() version uses
+ * 64-bit integers which is statistically necessary when bits-per-
+ * sample + log2(blocksize) > 30
*
* IN data[0,data_len-1]
* IN data_len
* OUT residual_bits_per_sample[0,FLAC__MAX_FIXED_ORDER]
*/
unsigned FLAC__fixed_compute_best_predictor(const int32 data[], unsigned data_len, real residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]);
+unsigned FLAC__fixed_compute_best_predictor_slow(const int32 data[], unsigned data_len, real residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]);
/*
* FLAC__fixed_compute_residual()