1 /* libFLAC - Free Lossless Audio Codec library
2 * Copyright (C) 2000-2009 Josh Coalson
3 * Copyright (C) 2011-2022 Xiph.Org Foundation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * - Neither the name of the Xiph.org Foundation nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include "share/compat.h"
40 #include "private/bitmath.h"
41 #include "private/fixed.h"
42 #include "private/macros.h"
43 #include "FLAC/assert.h"
48 #define local_abs(x) ((uint32_t)((x)<0? -(x) : (x)))
53 #define local_abs64(x) ((uint64_t)((x)<0? -(x) : (x)))
55 #ifdef FLAC__INTEGER_ONLY_LIBRARY
56 /* rbps stands for residual bits per sample
59 * rbps = log (-----------)
62 static FLAC__fixedpoint local__compute_rbps_integerized(FLAC__uint32 err, FLAC__uint32 n)
65 uint32_t bits; /* the number of bits required to represent a number */
66 int fracbits; /* the number of bits of rbps that comprise the fractional part */
68 FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint));
69 FLAC__ASSERT(err > 0);
72 FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE);
76 * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1.
77 * These allow us later to know we won't lose too much precision in the
78 * fixed-point division (err<<fracbits)/n.
81 fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2(err)+1);
85 /* err now holds err/n with fracbits fractional bits */
88 * Whittle err down to 16 bits max. 16 significant bits is enough for
91 FLAC__ASSERT(err > 0);
92 bits = FLAC__bitmath_ilog2(err)+1;
95 fracbits -= (bits-16);
97 rbps = (FLAC__uint32)err;
99 /* Multiply by fixed-point version of ln(2), with 16 fractional bits */
100 rbps *= FLAC__FP_LN2;
102 FLAC__ASSERT(fracbits >= 0);
104 /* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */
106 const int f = fracbits & 3;
113 rbps = FLAC__fixedpoint_log2(rbps, fracbits, (uint32_t)(-1));
119 * The return value must have 16 fractional bits. Since the whole part
120 * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits
121 * must be >= -3, these assertion allows us to be able to shift rbps
122 * left if necessary to get 16 fracbits without losing any bits of the
123 * whole part of rbps.
125 * There is a slight chance due to accumulated error that the whole part
126 * will require 6 bits, so we use 6 in the assertion. Really though as
127 * long as it fits in 13 bits (32 - (16 - (-3))) we are fine.
129 FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6);
130 FLAC__ASSERT(fracbits >= -3);
132 /* now shift the decimal point into place */
134 return rbps << (16-fracbits);
135 else if(fracbits > 16)
136 return rbps >> (fracbits-16);
141 static FLAC__fixedpoint local__compute_rbps_wide_integerized(FLAC__uint64 err, FLAC__uint32 n)
144 uint32_t bits; /* the number of bits required to represent a number */
145 int fracbits; /* the number of bits of rbps that comprise the fractional part */
147 FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint));
148 FLAC__ASSERT(err > 0);
151 FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE);
155 * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1.
156 * These allow us later to know we won't lose too much precision in the
157 * fixed-point division (err<<fracbits)/n.
160 fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2_wide(err)+1);
164 /* err now holds err/n with fracbits fractional bits */
167 * Whittle err down to 16 bits max. 16 significant bits is enough for
170 FLAC__ASSERT(err > 0);
171 bits = FLAC__bitmath_ilog2_wide(err)+1;
174 fracbits -= (bits-16);
176 rbps = (FLAC__uint32)err;
178 /* Multiply by fixed-point version of ln(2), with 16 fractional bits */
179 rbps *= FLAC__FP_LN2;
181 FLAC__ASSERT(fracbits >= 0);
183 /* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */
185 const int f = fracbits & 3;
192 rbps = FLAC__fixedpoint_log2(rbps, fracbits, (uint32_t)(-1));
198 * The return value must have 16 fractional bits. Since the whole part
199 * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits
200 * must be >= -3, these assertion allows us to be able to shift rbps
201 * left if necessary to get 16 fracbits without losing any bits of the
202 * whole part of rbps.
204 * There is a slight chance due to accumulated error that the whole part
205 * will require 6 bits, so we use 6 in the assertion. Really though as
206 * long as it fits in 13 bits (32 - (16 - (-3))) we are fine.
208 FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6);
209 FLAC__ASSERT(fracbits >= -3);
211 /* now shift the decimal point into place */
213 return rbps << (16-fracbits);
214 else if(fracbits > 16)
215 return rbps >> (fracbits-16);
221 #ifndef FLAC__INTEGER_ONLY_LIBRARY
222 uint32_t FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
224 uint32_t FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
227 FLAC__uint32 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
230 /* This code has been around a long time, and was written when compilers weren't able
231 * to vectorize code. These days, compilers are better in optimizing the next block
232 * which is also much more readable
234 FLAC__int32 last_error_0 = data[-1];
235 FLAC__int32 last_error_1 = data[-1] - data[-2];
236 FLAC__int32 last_error_2 = last_error_1 - (data[-2] - data[-3]);
237 FLAC__int32 last_error_3 = last_error_2 - (data[-2] - 2*data[-3] + data[-4]);
238 FLAC__int32 error, save;
239 /* total_error_* are 64-bits to avoid overflow when encoding
240 * erratic signals when the bits-per-sample and blocksize are
243 for(uint32_t i = 0; i < data_len; i++) {
244 error = data[i] ; total_error_0 += local_abs(error); save = error;
245 error -= last_error_0; total_error_1 += local_abs(error); last_error_0 = save; save = error;
246 error -= last_error_1; total_error_2 += local_abs(error); last_error_1 = save; save = error;
247 error -= last_error_2; total_error_3 += local_abs(error); last_error_2 = save; save = error;
248 error -= last_error_3; total_error_4 += local_abs(error); last_error_3 = save;
251 for(int i = 0; i < (int)data_len; i++) {
252 total_error_0 += local_abs(data[i]);
253 total_error_1 += local_abs(data[i] - data[i-1]);
254 total_error_2 += local_abs(data[i] - 2 * data[i-1] + data[i-2]);
255 total_error_3 += local_abs(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]);
256 total_error_4 += local_abs(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]);
261 /* prefer lower order */
262 if(total_error_0 <= flac_min(flac_min(flac_min(total_error_1, total_error_2), total_error_3), total_error_4))
264 else if(total_error_1 <= flac_min(flac_min(total_error_2, total_error_3), total_error_4))
266 else if(total_error_2 <= flac_min(total_error_3, total_error_4))
268 else if(total_error_3 <= total_error_4)
273 /* Estimate the expected number of bits per residual signal sample. */
274 /* 'total_error*' is linearly related to the variance of the residual */
275 /* signal, so we use it directly to compute E(|x|) */
276 FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
277 FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
278 FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
279 FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
280 FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
281 #ifndef FLAC__INTEGER_ONLY_LIBRARY
282 residual_bits_per_sample[0] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
283 residual_bits_per_sample[1] = (float)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
284 residual_bits_per_sample[2] = (float)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
285 residual_bits_per_sample[3] = (float)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
286 residual_bits_per_sample[4] = (float)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
288 residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_integerized(total_error_0, data_len) : 0;
289 residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_integerized(total_error_1, data_len) : 0;
290 residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_integerized(total_error_2, data_len) : 0;
291 residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_integerized(total_error_3, data_len) : 0;
292 residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_integerized(total_error_4, data_len) : 0;
298 #ifndef FLAC__INTEGER_ONLY_LIBRARY
299 uint32_t FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
301 uint32_t FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
304 FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0;
307 for(int i = 0; i < (int)data_len; i++) {
308 total_error_0 += local_abs(data[i]);
309 total_error_1 += local_abs(data[i] - data[i-1]);
310 total_error_2 += local_abs(data[i] - 2 * data[i-1] + data[i-2]);
311 total_error_3 += local_abs(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]);
312 total_error_4 += local_abs(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]);
315 /* prefer lower order */
316 if(total_error_0 <= flac_min(flac_min(flac_min(total_error_1, total_error_2), total_error_3), total_error_4))
318 else if(total_error_1 <= flac_min(flac_min(total_error_2, total_error_3), total_error_4))
320 else if(total_error_2 <= flac_min(total_error_3, total_error_4))
322 else if(total_error_3 <= total_error_4)
327 /* Estimate the expected number of bits per residual signal sample. */
328 /* 'total_error*' is linearly related to the variance of the residual */
329 /* signal, so we use it directly to compute E(|x|) */
330 FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
331 FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
332 FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
333 FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
334 FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
335 #ifndef FLAC__INTEGER_ONLY_LIBRARY
336 residual_bits_per_sample[0] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
337 residual_bits_per_sample[1] = (float)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
338 residual_bits_per_sample[2] = (float)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
339 residual_bits_per_sample[3] = (float)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
340 residual_bits_per_sample[4] = (float)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
342 residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_wide_integerized(total_error_0, data_len) : 0;
343 residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_wide_integerized(total_error_1, data_len) : 0;
344 residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_wide_integerized(total_error_2, data_len) : 0;
345 residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_wide_integerized(total_error_3, data_len) : 0;
346 residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_wide_integerized(total_error_4, data_len) : 0;
352 #ifndef FLAC__INTEGER_ONLY_LIBRARY
353 #define CHECK_ORDER_IS_VALID(macro_order) \
354 if(order_##macro_order##_is_valid && total_error_##macro_order < smallest_error) { \
355 order = macro_order; \
356 smallest_error = total_error_##macro_order ; \
357 residual_bits_per_sample[ macro_order ] = (float)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0); \
360 residual_bits_per_sample[ macro_order ] = 34.0f;
362 #define CHECK_ORDER_IS_VALID(macro_order) \
363 if(order_##macro_order##_is_valid && total_error_##macro_order < smallest_error) { \
364 order = macro_order; \
365 smallest_error = total_error_##macro_order ; \
366 residual_bits_per_sample[ macro_order ] = (total_error_##macro_order > 0) ? local__compute_rbps_wide_integerized(total_error_##macro_order, data_len) : 0; \
369 residual_bits_per_sample[ macro_order ] = 34 * FLAC__FP_ONE;
373 #ifndef FLAC__INTEGER_ONLY_LIBRARY
374 uint32_t FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
376 uint32_t FLAC__fixed_compute_best_predictor_limit_residual(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
379 FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0, smallest_error = UINT64_MAX;
380 FLAC__uint64 error_0, error_1, error_2, error_3, error_4;
381 FLAC__bool order_0_is_valid = true, order_1_is_valid = true, order_2_is_valid = true, order_3_is_valid = true, order_4_is_valid = true;
384 for(int i = 0; i < (int)data_len; i++) {
385 error_0 = local_abs64((FLAC__int64)data[i]);
386 error_1 = (i > 0) ? local_abs64((FLAC__int64)data[i] - data[i-1]) : 0 ;
387 error_2 = (i > 1) ? local_abs64((FLAC__int64)data[i] - 2 * (FLAC__int64)data[i-1] + data[i-2]) : 0;
388 error_3 = (i > 2) ? local_abs64((FLAC__int64)data[i] - 3 * (FLAC__int64)data[i-1] + 3 * (FLAC__int64)data[i-2] - data[i-3]) : 0;
389 error_4 = (i > 3) ? local_abs64((FLAC__int64)data[i] - 4 * (FLAC__int64)data[i-1] + 6 * (FLAC__int64)data[i-2] - 4 * (FLAC__int64)data[i-3] + data[i-4]) : 0;
391 total_error_0 += error_0;
392 total_error_1 += error_1;
393 total_error_2 += error_2;
394 total_error_3 += error_3;
395 total_error_4 += error_4;
397 /* residual must not be INT32_MIN because abs(INT32_MIN) is undefined */
398 if(error_0 > INT32_MAX)
399 order_0_is_valid = false;
400 if(error_1 > INT32_MAX)
401 order_1_is_valid = false;
402 if(error_2 > INT32_MAX)
403 order_2_is_valid = false;
404 if(error_3 > INT32_MAX)
405 order_3_is_valid = false;
406 if(error_4 > INT32_MAX)
407 order_4_is_valid = false;
410 CHECK_ORDER_IS_VALID(0);
411 CHECK_ORDER_IS_VALID(1);
412 CHECK_ORDER_IS_VALID(2);
413 CHECK_ORDER_IS_VALID(3);
414 CHECK_ORDER_IS_VALID(4);
419 #ifndef FLAC__INTEGER_ONLY_LIBRARY
420 uint32_t FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[], uint32_t data_len, float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
422 uint32_t FLAC__fixed_compute_best_predictor_limit_residual_33bit(const FLAC__int64 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1])
425 FLAC__uint64 total_error_0 = 0, total_error_1 = 0, total_error_2 = 0, total_error_3 = 0, total_error_4 = 0, smallest_error = UINT64_MAX;
426 FLAC__uint64 error_0, error_1, error_2, error_3, error_4;
427 FLAC__bool order_0_is_valid = true, order_1_is_valid = true, order_2_is_valid = true, order_3_is_valid = true, order_4_is_valid = true;
430 for(int i = 0; i < (int)data_len; i++) {
431 error_0 = local_abs64(data[i]);
432 error_1 = (i > 0) ? local_abs64(data[i] - data[i-1]) : 0 ;
433 error_2 = (i > 1) ? local_abs64(data[i] - 2 * data[i-1] + data[i-2]) : 0;
434 error_3 = (i > 2) ? local_abs64(data[i] - 3 * data[i-1] + 3 * data[i-2] - data[i-3]) : 0;
435 error_4 = (i > 3) ? local_abs64(data[i] - 4 * data[i-1] + 6 * data[i-2] - 4 * data[i-3] + data[i-4]) : 0;
437 total_error_0 += error_0;
438 total_error_1 += error_1;
439 total_error_2 += error_2;
440 total_error_3 += error_3;
441 total_error_4 += error_4;
443 /* residual must not be INT32_MIN because abs(INT32_MIN) is undefined */
444 if(error_0 > INT32_MAX)
445 order_0_is_valid = false;
446 if(error_1 > INT32_MAX)
447 order_1_is_valid = false;
448 if(error_2 > INT32_MAX)
449 order_2_is_valid = false;
450 if(error_3 > INT32_MAX)
451 order_3_is_valid = false;
452 if(error_4 > INT32_MAX)
453 order_4_is_valid = false;
456 CHECK_ORDER_IS_VALID(0);
457 CHECK_ORDER_IS_VALID(1);
458 CHECK_ORDER_IS_VALID(2);
459 CHECK_ORDER_IS_VALID(3);
460 CHECK_ORDER_IS_VALID(4);
465 void FLAC__fixed_compute_residual(const FLAC__int32 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
467 const int idata_len = (int)data_len;
472 FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
473 memcpy(residual, data, sizeof(residual[0])*data_len);
476 for(i = 0; i < idata_len; i++)
477 residual[i] = data[i] - data[i-1];
480 for(i = 0; i < idata_len; i++)
481 residual[i] = data[i] - 2*data[i-1] + data[i-2];
484 for(i = 0; i < idata_len; i++)
485 residual[i] = data[i] - 3*data[i-1] + 3*data[i-2] - data[i-3];
488 for(i = 0; i < idata_len; i++)
489 residual[i] = data[i] - 4*data[i-1] + 6*data[i-2] - 4*data[i-3] + data[i-4];
496 void FLAC__fixed_compute_residual_wide(const FLAC__int32 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
498 const int idata_len = (int)data_len;
503 FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
504 memcpy(residual, data, sizeof(residual[0])*data_len);
507 for(i = 0; i < idata_len; i++)
508 residual[i] = (FLAC__int64)data[i] - data[i-1];
511 for(i = 0; i < idata_len; i++)
512 residual[i] = (FLAC__int64)data[i] - 2*(FLAC__int64)data[i-1] + data[i-2];
515 for(i = 0; i < idata_len; i++)
516 residual[i] = (FLAC__int64)data[i] - 3*(FLAC__int64)data[i-1] + 3*(FLAC__int64)data[i-2] - data[i-3];
519 for(i = 0; i < idata_len; i++)
520 residual[i] = (FLAC__int64)data[i] - 4*(FLAC__int64)data[i-1] + 6*(FLAC__int64)data[i-2] - 4*(FLAC__int64)data[i-3] + data[i-4];
527 void FLAC__fixed_compute_residual_wide_33bit(const FLAC__int64 data[], uint32_t data_len, uint32_t order, FLAC__int32 residual[])
529 const int idata_len = (int)data_len;
534 for(i = 0; i < idata_len; i++)
535 residual[i] = data[i];
538 for(i = 0; i < idata_len; i++)
539 residual[i] = data[i] - data[i-1];
542 for(i = 0; i < idata_len; i++)
543 residual[i] = data[i] - 2*data[i-1] + data[i-2];
546 for(i = 0; i < idata_len; i++)
547 residual[i] = data[i] - 3*data[i-1] + 3*data[i-2] - data[i-3];
550 for(i = 0; i < idata_len; i++)
551 residual[i] = data[i] - 4*data[i-1] + 6*data[i-2] - 4*data[i-3] + data[i-4];
558 #ifdef FUZZING_BUILD_MODE_NO_SANITIZE_SIGNED_INTEGER_OVERFLOW
559 /* The attribute below is to silence the undefined sanitizer of oss-fuzz.
560 * Because fuzzing feeds bogus predictors and residual samples to the
561 * decoder, having overflows in this section is unavoidable. Also,
562 * because the calculated values are audio path only, there is no
563 * potential for security problems */
564 __attribute__((no_sanitize("signed-integer-overflow")))
566 void FLAC__fixed_restore_signal(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int32 data[])
568 int i, idata_len = (int)data_len;
572 FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
573 memcpy(data, residual, sizeof(residual[0])*data_len);
576 for(i = 0; i < idata_len; i++)
577 data[i] = residual[i] + data[i-1];
580 for(i = 0; i < idata_len; i++)
581 data[i] = residual[i] + 2*data[i-1] - data[i-2];
584 for(i = 0; i < idata_len; i++)
585 data[i] = residual[i] + 3*data[i-1] - 3*data[i-2] + data[i-3];
588 for(i = 0; i < idata_len; i++)
589 data[i] = residual[i] + 4*data[i-1] - 6*data[i-2] + 4*data[i-3] - data[i-4];
596 void FLAC__fixed_restore_signal_wide(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int32 data[])
598 int i, idata_len = (int)data_len;
602 FLAC__ASSERT(sizeof(residual[0]) == sizeof(data[0]));
603 memcpy(data, residual, sizeof(residual[0])*data_len);
606 for(i = 0; i < idata_len; i++)
607 data[i] = (FLAC__int64)residual[i] + (FLAC__int64)data[i-1];
610 for(i = 0; i < idata_len; i++)
611 data[i] = (FLAC__int64)residual[i] + 2*(FLAC__int64)data[i-1] - (FLAC__int64)data[i-2];
614 for(i = 0; i < idata_len; i++)
615 data[i] = (FLAC__int64)residual[i] + 3*(FLAC__int64)data[i-1] - 3*(FLAC__int64)data[i-2] + (FLAC__int64)data[i-3];
618 for(i = 0; i < idata_len; i++)
619 data[i] = (FLAC__int64)residual[i] + 4*(FLAC__int64)data[i-1] - 6*(FLAC__int64)data[i-2] + 4*(FLAC__int64)data[i-3] - (FLAC__int64)data[i-4];
626 #ifdef FUZZING_BUILD_MODE_NO_SANITIZE_SIGNED_INTEGER_OVERFLOW
627 /* The attribute below is to silence the undefined sanitizer of oss-fuzz.
628 * Because fuzzing feeds bogus predictors and residual samples to the
629 * decoder, having overflows in this section is unavoidable. Also,
630 * because the calculated values are audio path only, there is no
631 * potential for security problems */
632 __attribute__((no_sanitize("signed-integer-overflow")))
634 void FLAC__fixed_restore_signal_wide_33bit(const FLAC__int32 residual[], uint32_t data_len, uint32_t order, FLAC__int64 data[])
636 int i, idata_len = (int)data_len;
640 for(i = 0; i < idata_len; i++)
641 data[i] = residual[i];
644 for(i = 0; i < idata_len; i++)
645 data[i] = (FLAC__int64)residual[i] + data[i-1];
648 for(i = 0; i < idata_len; i++)
649 data[i] = (FLAC__int64)residual[i] + 2*data[i-1] - data[i-2];
652 for(i = 0; i < idata_len; i++)
653 data[i] = (FLAC__int64)residual[i] + 3*data[i-1] - 3*data[i-2] + data[i-3];
656 for(i = 0; i < idata_len; i++)
657 data[i] = (FLAC__int64)residual[i] + 4*data[i-1] - 6*data[i-2] + 4*data[i-3] - data[i-4];