size_t frames;
/* Currently, webrtc uses fixed size(10ms) buffer */
- int loops;
size_t fixed_bytes;
size_t fixed_frames;
};
u->fixed_bytes = fixed_bytes;
u->fixed_frames = fixed_bytes / audio_effect_util_get_frame_size(format, channels);
- u->loops = request_bytes / fixed_bytes;
config.Set<ExperimentalNs>(new ExperimentalNs(false));
config.Set<Intelligibility>(new Intelligibility(false));
allocate_stream_buffer(u, u->fixed_frames, channels);
- LOG_INFO("webrtc processes init");
+ LOG_INFO("webrtc processes init. fixed_frame(%zu) fixed_bytes(%zu)", u->fixed_frames, u->fixed_bytes);
return (void *)u;
struct userdata *u = (struct userdata *)priv;
size_t frames;
size_t float_sample_size;
-
+ int ret;
assert(u);
assert(rec);
frames = u->fixed_frames;
float_sample_size = audio_effect_util_get_sample_size(AUDIO_EFFECT_FORMAT_FLOAT);
- for (int i = 0; i < u->loops; i++) {
- int ret;
-
- audio_effect_util_convert_s16le_to_float(frames * u->channels, (const short *)ref, u->ref_fbuf);
- audio_effect_util_deinterleave(u->ref_fbuf, (void **)u->ref_dbuf, u->channels, float_sample_size, frames);
-
- /* reference */
- ret = u->ap->ProcessReverseStream(u->ref_dbuf, *u->sconfig, *u->sconfig, u->ref_dbuf);
- if (ret != AudioProcessing::kNoError) {
- LOG_ERROR("Failed to process reverse stream");
- return -1;
- }
- u->ap->set_stream_delay_ms(0);
+ audio_effect_util_convert_s16le_to_float(frames * u->channels, (const short *)ref, u->ref_fbuf);
+ audio_effect_util_deinterleave(u->ref_fbuf, (void **)u->ref_dbuf, u->channels, float_sample_size, frames);
- /* capture */
- audio_effect_util_convert_s16le_to_float(frames * u->channels, (const short *)rec, u->rec_fbuf);
- audio_effect_util_deinterleave(u->rec_fbuf, (void **)u->rec_dbuf, u->channels, float_sample_size, frames);
+ /* reference */
+ ret = u->ap->ProcessReverseStream(u->ref_dbuf, *u->sconfig, *u->sconfig, u->ref_dbuf);
+ if (ret != AudioProcessing::kNoError) {
+ LOG_ERROR("Failed to process reverse stream");
+ return -1;
+ }
- ret = u->ap->ProcessStream(u->rec_dbuf, *u->sconfig, *u->sconfig, u->out_dbuf);
- if (ret != AudioProcessing::kNoError) {
- LOG_ERROR("Failed to process stream");
- return -1;
- }
+ u->ap->set_stream_delay_ms(0);
- audio_effect_util_interleave((const void **)u->out_dbuf, u->out_fbuf, u->channels, float_sample_size, frames);
- audio_effect_util_convert_float_to_s16le(frames * u->channels, u->out_fbuf, (short *)out);
+ /* capture */
+ audio_effect_util_convert_s16le_to_float(frames * u->channels, (const short *)rec, u->rec_fbuf);
+ audio_effect_util_deinterleave(u->rec_fbuf, (void **)u->rec_dbuf, u->channels, float_sample_size, frames);
- rec += u->fixed_bytes;
- ref += u->fixed_bytes;
- out += u->fixed_bytes;
+ ret = u->ap->ProcessStream(u->rec_dbuf, *u->sconfig, *u->sconfig, u->out_dbuf);
+ if (ret != AudioProcessing::kNoError) {
+ LOG_ERROR("Failed to process stream");
+ return -1;
}
+ audio_effect_util_interleave((const void **)u->out_dbuf, u->out_fbuf, u->channels, float_sample_size, frames);
+ audio_effect_util_convert_float_to_s16le(frames * u->channels, u->out_fbuf, (short *)out);
+
return 0;
}