Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / webrtc / voice_engine / channel.cc
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #include "webrtc/voice_engine/channel.h"
12
13 #include "webrtc/base/timeutils.h"
14 #include "webrtc/common.h"
15 #include "webrtc/modules/audio_device/include/audio_device.h"
16 #include "webrtc/modules/audio_processing/include/audio_processing.h"
17 #include "webrtc/modules/interface/module_common_types.h"
18 #include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
19 #include "webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h"
20 #include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
21 #include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
22 #include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
23 #include "webrtc/modules/utility/interface/audio_frame_operations.h"
24 #include "webrtc/modules/utility/interface/process_thread.h"
25 #include "webrtc/modules/utility/interface/rtp_dump.h"
26 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
27 #include "webrtc/system_wrappers/interface/logging.h"
28 #include "webrtc/system_wrappers/interface/trace.h"
29 #include "webrtc/video_engine/include/vie_network.h"
30 #include "webrtc/voice_engine/include/voe_base.h"
31 #include "webrtc/voice_engine/include/voe_external_media.h"
32 #include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
33 #include "webrtc/voice_engine/output_mixer.h"
34 #include "webrtc/voice_engine/statistics.h"
35 #include "webrtc/voice_engine/transmit_mixer.h"
36 #include "webrtc/voice_engine/utility.h"
37
38 #if defined(_WIN32)
39 #include <Qos.h>
40 #endif
41
42 namespace webrtc {
43 namespace voe {
44
45 // Extend the default RTCP statistics struct with max_jitter, defined as the
46 // maximum jitter value seen in an RTCP report block.
47 struct ChannelStatistics : public RtcpStatistics {
48   ChannelStatistics() : rtcp(), max_jitter(0) {}
49
50   RtcpStatistics rtcp;
51   uint32_t max_jitter;
52 };
53
54 // Statistics callback, called at each generation of a new RTCP report block.
55 class StatisticsProxy : public RtcpStatisticsCallback {
56  public:
57   StatisticsProxy(uint32_t ssrc)
58    : stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
59      ssrc_(ssrc) {}
60   virtual ~StatisticsProxy() {}
61
62   virtual void StatisticsUpdated(const RtcpStatistics& statistics,
63                                  uint32_t ssrc) OVERRIDE {
64     if (ssrc != ssrc_)
65       return;
66
67     CriticalSectionScoped cs(stats_lock_.get());
68     stats_.rtcp = statistics;
69     if (statistics.jitter > stats_.max_jitter) {
70       stats_.max_jitter = statistics.jitter;
71     }
72   }
73
74   void ResetStatistics() {
75     CriticalSectionScoped cs(stats_lock_.get());
76     stats_ = ChannelStatistics();
77   }
78
79   ChannelStatistics GetStats() {
80     CriticalSectionScoped cs(stats_lock_.get());
81     return stats_;
82   }
83
84  private:
85   // StatisticsUpdated calls are triggered from threads in the RTP module,
86   // while GetStats calls can be triggered from the public voice engine API,
87   // hence synchronization is needed.
88   scoped_ptr<CriticalSectionWrapper> stats_lock_;
89   const uint32_t ssrc_;
90   ChannelStatistics stats_;
91 };
92
93 class VoEBitrateObserver : public BitrateObserver {
94  public:
95   explicit VoEBitrateObserver(Channel* owner)
96       : owner_(owner) {}
97   virtual ~VoEBitrateObserver() {}
98
99   // Implements BitrateObserver.
100   virtual void OnNetworkChanged(const uint32_t bitrate_bps,
101                                 const uint8_t fraction_lost,
102                                 const uint32_t rtt) OVERRIDE {
103     // |fraction_lost| has a scale of 0 - 255.
104     owner_->OnNetworkChanged(bitrate_bps, fraction_lost, rtt);
105   }
106
107  private:
108   Channel* owner_;
109 };
110
111 int32_t
112 Channel::SendData(FrameType frameType,
113                   uint8_t   payloadType,
114                   uint32_t  timeStamp,
115                   const uint8_t*  payloadData,
116                   uint16_t  payloadSize,
117                   const RTPFragmentationHeader* fragmentation)
118 {
119     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
120                  "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
121                  " payloadSize=%u, fragmentation=0x%x)",
122                  frameType, payloadType, timeStamp, payloadSize, fragmentation);
123
124     if (_includeAudioLevelIndication)
125     {
126         // Store current audio level in the RTP/RTCP module.
127         // The level will be used in combination with voice-activity state
128         // (frameType) to add an RTP header extension
129         _rtpRtcpModule->SetAudioLevel(rms_level_.RMS());
130     }
131
132     // Push data from ACM to RTP/RTCP-module to deliver audio frame for
133     // packetization.
134     // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
135     if (_rtpRtcpModule->SendOutgoingData((FrameType&)frameType,
136                                         payloadType,
137                                         timeStamp,
138                                         // Leaving the time when this frame was
139                                         // received from the capture device as
140                                         // undefined for voice for now.
141                                         -1,
142                                         payloadData,
143                                         payloadSize,
144                                         fragmentation) == -1)
145     {
146         _engineStatisticsPtr->SetLastError(
147             VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
148             "Channel::SendData() failed to send data to RTP/RTCP module");
149         return -1;
150     }
151
152     _lastLocalTimeStamp = timeStamp;
153     _lastPayloadType = payloadType;
154
155     return 0;
156 }
157
158 int32_t
159 Channel::InFrameType(int16_t frameType)
160 {
161     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
162                  "Channel::InFrameType(frameType=%d)", frameType);
163
164     CriticalSectionScoped cs(&_callbackCritSect);
165     // 1 indicates speech
166     _sendFrameType = (frameType == 1) ? 1 : 0;
167     return 0;
168 }
169
170 int32_t
171 Channel::OnRxVadDetected(int vadDecision)
172 {
173     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
174                  "Channel::OnRxVadDetected(vadDecision=%d)", vadDecision);
175
176     CriticalSectionScoped cs(&_callbackCritSect);
177     if (_rxVadObserverPtr)
178     {
179         _rxVadObserverPtr->OnRxVad(_channelId, vadDecision);
180     }
181
182     return 0;
183 }
184
185 int
186 Channel::SendPacket(int channel, const void *data, int len)
187 {
188     channel = VoEChannelId(channel);
189     assert(channel == _channelId);
190
191     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
192                  "Channel::SendPacket(channel=%d, len=%d)", channel, len);
193
194     CriticalSectionScoped cs(&_callbackCritSect);
195
196     if (_transportPtr == NULL)
197     {
198         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
199                      "Channel::SendPacket() failed to send RTP packet due to"
200                      " invalid transport object");
201         return -1;
202     }
203
204     uint8_t* bufferToSendPtr = (uint8_t*)data;
205     int32_t bufferLength = len;
206
207     // Dump the RTP packet to a file (if RTP dump is enabled).
208     if (_rtpDumpOut.DumpPacket((const uint8_t*)data, len) == -1)
209     {
210         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
211                      VoEId(_instanceId,_channelId),
212                      "Channel::SendPacket() RTP dump to output file failed");
213     }
214
215     int n = _transportPtr->SendPacket(channel, bufferToSendPtr,
216                                       bufferLength);
217     if (n < 0) {
218       std::string transport_name =
219           _externalTransport ? "external transport" : "WebRtc sockets";
220       WEBRTC_TRACE(kTraceError, kTraceVoice,
221                    VoEId(_instanceId,_channelId),
222                    "Channel::SendPacket() RTP transmission using %s failed",
223                    transport_name.c_str());
224       return -1;
225     }
226     return n;
227 }
228
229 int
230 Channel::SendRTCPPacket(int channel, const void *data, int len)
231 {
232     channel = VoEChannelId(channel);
233     assert(channel == _channelId);
234
235     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
236                  "Channel::SendRTCPPacket(channel=%d, len=%d)", channel, len);
237
238     CriticalSectionScoped cs(&_callbackCritSect);
239     if (_transportPtr == NULL)
240     {
241         WEBRTC_TRACE(kTraceError, kTraceVoice,
242                      VoEId(_instanceId,_channelId),
243                      "Channel::SendRTCPPacket() failed to send RTCP packet"
244                      " due to invalid transport object");
245         return -1;
246     }
247
248     uint8_t* bufferToSendPtr = (uint8_t*)data;
249     int32_t bufferLength = len;
250
251     // Dump the RTCP packet to a file (if RTP dump is enabled).
252     if (_rtpDumpOut.DumpPacket((const uint8_t*)data, len) == -1)
253     {
254         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
255                      VoEId(_instanceId,_channelId),
256                      "Channel::SendPacket() RTCP dump to output file failed");
257     }
258
259     int n = _transportPtr->SendRTCPPacket(channel,
260                                           bufferToSendPtr,
261                                           bufferLength);
262     if (n < 0) {
263       std::string transport_name =
264           _externalTransport ? "external transport" : "WebRtc sockets";
265       WEBRTC_TRACE(kTraceInfo, kTraceVoice,
266                    VoEId(_instanceId,_channelId),
267                    "Channel::SendRTCPPacket() transmission using %s failed",
268                    transport_name.c_str());
269       return -1;
270     }
271     return n;
272 }
273
274 void
275 Channel::OnPlayTelephoneEvent(int32_t id,
276                               uint8_t event,
277                               uint16_t lengthMs,
278                               uint8_t volume)
279 {
280     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
281                  "Channel::OnPlayTelephoneEvent(id=%d, event=%u, lengthMs=%u,"
282                  " volume=%u)", id, event, lengthMs, volume);
283
284     if (!_playOutbandDtmfEvent || (event > 15))
285     {
286         // Ignore callback since feedback is disabled or event is not a
287         // Dtmf tone event.
288         return;
289     }
290
291     assert(_outputMixerPtr != NULL);
292
293     // Start playing out the Dtmf tone (if playout is enabled).
294     // Reduce length of tone with 80ms to the reduce risk of echo.
295     _outputMixerPtr->PlayDtmfTone(event, lengthMs - 80, volume);
296 }
297
298 void
299 Channel::OnIncomingSSRCChanged(int32_t id, uint32_t ssrc)
300 {
301     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
302                  "Channel::OnIncomingSSRCChanged(id=%d, SSRC=%d)",
303                  id, ssrc);
304
305     // Update ssrc so that NTP for AV sync can be updated.
306     _rtpRtcpModule->SetRemoteSSRC(ssrc);
307 }
308
309 void Channel::OnIncomingCSRCChanged(int32_t id,
310                                     uint32_t CSRC,
311                                     bool added)
312 {
313     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
314                  "Channel::OnIncomingCSRCChanged(id=%d, CSRC=%d, added=%d)",
315                  id, CSRC, added);
316 }
317
318 void Channel::ResetStatistics(uint32_t ssrc) {
319   StreamStatistician* statistician =
320       rtp_receive_statistics_->GetStatistician(ssrc);
321   if (statistician) {
322     statistician->ResetStatistics();
323   }
324   statistics_proxy_->ResetStatistics();
325 }
326
327 void
328 Channel::OnApplicationDataReceived(int32_t id,
329                                    uint8_t subType,
330                                    uint32_t name,
331                                    uint16_t length,
332                                    const uint8_t* data)
333 {
334     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
335                  "Channel::OnApplicationDataReceived(id=%d, subType=%u,"
336                  " name=%u, length=%u)",
337                  id, subType, name, length);
338
339     int32_t channel = VoEChannelId(id);
340     assert(channel == _channelId);
341
342     if (_rtcpObserver)
343     {
344         CriticalSectionScoped cs(&_callbackCritSect);
345
346         if (_rtcpObserverPtr)
347         {
348             _rtcpObserverPtr->OnApplicationDataReceived(channel,
349                                                         subType,
350                                                         name,
351                                                         data,
352                                                         length);
353         }
354     }
355 }
356
357 int32_t
358 Channel::OnInitializeDecoder(
359     int32_t id,
360     int8_t payloadType,
361     const char payloadName[RTP_PAYLOAD_NAME_SIZE],
362     int frequency,
363     uint8_t channels,
364     uint32_t rate)
365 {
366     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
367                  "Channel::OnInitializeDecoder(id=%d, payloadType=%d, "
368                  "payloadName=%s, frequency=%u, channels=%u, rate=%u)",
369                  id, payloadType, payloadName, frequency, channels, rate);
370
371     assert(VoEChannelId(id) == _channelId);
372
373     CodecInst receiveCodec = {0};
374     CodecInst dummyCodec = {0};
375
376     receiveCodec.pltype = payloadType;
377     receiveCodec.plfreq = frequency;
378     receiveCodec.channels = channels;
379     receiveCodec.rate = rate;
380     strncpy(receiveCodec.plname, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
381
382     audio_coding_->Codec(payloadName, &dummyCodec, frequency, channels);
383     receiveCodec.pacsize = dummyCodec.pacsize;
384
385     // Register the new codec to the ACM
386     if (audio_coding_->RegisterReceiveCodec(receiveCodec) == -1)
387     {
388         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
389                      VoEId(_instanceId, _channelId),
390                      "Channel::OnInitializeDecoder() invalid codec ("
391                      "pt=%d, name=%s) received - 1", payloadType, payloadName);
392         _engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR);
393         return -1;
394     }
395
396     return 0;
397 }
398
399 void
400 Channel::OnPacketTimeout(int32_t id)
401 {
402     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
403                  "Channel::OnPacketTimeout(id=%d)", id);
404
405     CriticalSectionScoped cs(_callbackCritSectPtr);
406     if (_voiceEngineObserverPtr)
407     {
408         if (channel_state_.Get().receiving || _externalTransport)
409         {
410             int32_t channel = VoEChannelId(id);
411             assert(channel == _channelId);
412             // Ensure that next OnReceivedPacket() callback will trigger
413             // a VE_PACKET_RECEIPT_RESTARTED callback.
414             _rtpPacketTimedOut = true;
415             // Deliver callback to the observer
416             WEBRTC_TRACE(kTraceInfo, kTraceVoice,
417                          VoEId(_instanceId,_channelId),
418                          "Channel::OnPacketTimeout() => "
419                          "CallbackOnError(VE_RECEIVE_PACKET_TIMEOUT)");
420             _voiceEngineObserverPtr->CallbackOnError(channel,
421                                                      VE_RECEIVE_PACKET_TIMEOUT);
422         }
423     }
424 }
425
426 void
427 Channel::OnReceivedPacket(int32_t id,
428                           RtpRtcpPacketType packetType)
429 {
430     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
431                  "Channel::OnReceivedPacket(id=%d, packetType=%d)",
432                  id, packetType);
433
434     assert(VoEChannelId(id) == _channelId);
435
436     // Notify only for the case when we have restarted an RTP session.
437     if (_rtpPacketTimedOut && (kPacketRtp == packetType))
438     {
439         CriticalSectionScoped cs(_callbackCritSectPtr);
440         if (_voiceEngineObserverPtr)
441         {
442             int32_t channel = VoEChannelId(id);
443             assert(channel == _channelId);
444             // Reset timeout mechanism
445             _rtpPacketTimedOut = false;
446             // Deliver callback to the observer
447             WEBRTC_TRACE(kTraceInfo, kTraceVoice,
448                          VoEId(_instanceId,_channelId),
449                          "Channel::OnPacketTimeout() =>"
450                          " CallbackOnError(VE_PACKET_RECEIPT_RESTARTED)");
451             _voiceEngineObserverPtr->CallbackOnError(
452                 channel,
453                 VE_PACKET_RECEIPT_RESTARTED);
454         }
455     }
456 }
457
458 void
459 Channel::OnPeriodicDeadOrAlive(int32_t id,
460                                RTPAliveType alive)
461 {
462     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
463                  "Channel::OnPeriodicDeadOrAlive(id=%d, alive=%d)", id, alive);
464
465     {
466         CriticalSectionScoped cs(&_callbackCritSect);
467         if (!_connectionObserver)
468             return;
469     }
470
471     int32_t channel = VoEChannelId(id);
472     assert(channel == _channelId);
473
474     // Use Alive as default to limit risk of false Dead detections
475     bool isAlive(true);
476
477     // Always mark the connection as Dead when the module reports kRtpDead
478     if (kRtpDead == alive)
479     {
480         isAlive = false;
481     }
482
483     // It is possible that the connection is alive even if no RTP packet has
484     // been received for a long time since the other side might use VAD/DTX
485     // and a low SID-packet update rate.
486     if ((kRtpNoRtp == alive) && channel_state_.Get().playing)
487     {
488         // Detect Alive for all NetEQ states except for the case when we are
489         // in PLC_CNG state.
490         // PLC_CNG <=> background noise only due to long expand or error.
491         // Note that, the case where the other side stops sending during CNG
492         // state will be detected as Alive. Dead is is not set until after
493         // missing RTCP packets for at least twelve seconds (handled
494         // internally by the RTP/RTCP module).
495         isAlive = (_outputSpeechType != AudioFrame::kPLCCNG);
496     }
497
498     // Send callback to the registered observer
499     if (_connectionObserver)
500     {
501         CriticalSectionScoped cs(&_callbackCritSect);
502         if (_connectionObserverPtr)
503         {
504             _connectionObserverPtr->OnPeriodicDeadOrAlive(channel, isAlive);
505         }
506     }
507 }
508
509 int32_t
510 Channel::OnReceivedPayloadData(const uint8_t* payloadData,
511                                uint16_t payloadSize,
512                                const WebRtcRTPHeader* rtpHeader)
513 {
514     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
515                  "Channel::OnReceivedPayloadData(payloadSize=%d,"
516                  " payloadType=%u, audioChannel=%u)",
517                  payloadSize,
518                  rtpHeader->header.payloadType,
519                  rtpHeader->type.Audio.channel);
520
521     if (!channel_state_.Get().playing)
522     {
523         // Avoid inserting into NetEQ when we are not playing. Count the
524         // packet as discarded.
525         WEBRTC_TRACE(kTraceStream, kTraceVoice,
526                      VoEId(_instanceId, _channelId),
527                      "received packet is discarded since playing is not"
528                      " activated");
529         _numberOfDiscardedPackets++;
530         return 0;
531     }
532
533     // Push the incoming payload (parsed and ready for decoding) into the ACM
534     if (audio_coding_->IncomingPacket(payloadData,
535                                       payloadSize,
536                                       *rtpHeader) != 0)
537     {
538         _engineStatisticsPtr->SetLastError(
539             VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
540             "Channel::OnReceivedPayloadData() unable to push data to the ACM");
541         return -1;
542     }
543
544     // Update the packet delay.
545     UpdatePacketDelay(rtpHeader->header.timestamp,
546                       rtpHeader->header.sequenceNumber);
547
548     uint16_t round_trip_time = 0;
549     _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), &round_trip_time,
550                         NULL, NULL, NULL);
551
552     std::vector<uint16_t> nack_list = audio_coding_->GetNackList(
553         round_trip_time);
554     if (!nack_list.empty()) {
555       // Can't use nack_list.data() since it's not supported by all
556       // compilers.
557       ResendPackets(&(nack_list[0]), static_cast<int>(nack_list.size()));
558     }
559     return 0;
560 }
561
562 bool Channel::OnRecoveredPacket(const uint8_t* rtp_packet,
563                                 int rtp_packet_length) {
564   RTPHeader header;
565   if (!rtp_header_parser_->Parse(rtp_packet, rtp_packet_length, &header)) {
566     WEBRTC_TRACE(kTraceDebug, webrtc::kTraceVoice, _channelId,
567                  "IncomingPacket invalid RTP header");
568     return false;
569   }
570   header.payload_type_frequency =
571       rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
572   if (header.payload_type_frequency < 0)
573     return false;
574   return ReceivePacket(rtp_packet, rtp_packet_length, header, false);
575 }
576
577 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame)
578 {
579     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
580                  "Channel::GetAudioFrame(id=%d)", id);
581
582     // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
583     if (audio_coding_->PlayoutData10Ms(audioFrame.sample_rate_hz_,
584                                        &audioFrame) == -1)
585     {
586         WEBRTC_TRACE(kTraceError, kTraceVoice,
587                      VoEId(_instanceId,_channelId),
588                      "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
589         // In all likelihood, the audio in this frame is garbage. We return an
590         // error so that the audio mixer module doesn't add it to the mix. As
591         // a result, it won't be played out and the actions skipped here are
592         // irrelevant.
593         return -1;
594     }
595
596     if (_RxVadDetection)
597     {
598         UpdateRxVadDetection(audioFrame);
599     }
600
601     // Convert module ID to internal VoE channel ID
602     audioFrame.id_ = VoEChannelId(audioFrame.id_);
603     // Store speech type for dead-or-alive detection
604     _outputSpeechType = audioFrame.speech_type_;
605
606     ChannelState::State state = channel_state_.Get();
607
608     if (state.rx_apm_is_enabled) {
609       int err = rx_audioproc_->ProcessStream(&audioFrame);
610       if (err) {
611         LOG(LS_ERROR) << "ProcessStream() error: " << err;
612         assert(false);
613       }
614     }
615
616     float output_gain = 1.0f;
617     float left_pan =  1.0f;
618     float right_pan =  1.0f;
619     {
620       CriticalSectionScoped cs(&volume_settings_critsect_);
621       output_gain = _outputGain;
622       left_pan = _panLeft;
623       right_pan= _panRight;
624     }
625
626     // Output volume scaling
627     if (output_gain < 0.99f || output_gain > 1.01f)
628     {
629         AudioFrameOperations::ScaleWithSat(output_gain, audioFrame);
630     }
631
632     // Scale left and/or right channel(s) if stereo and master balance is
633     // active
634
635     if (left_pan != 1.0f || right_pan != 1.0f)
636     {
637         if (audioFrame.num_channels_ == 1)
638         {
639             // Emulate stereo mode since panning is active.
640             // The mono signal is copied to both left and right channels here.
641             AudioFrameOperations::MonoToStereo(&audioFrame);
642         }
643         // For true stereo mode (when we are receiving a stereo signal), no
644         // action is needed.
645
646         // Do the panning operation (the audio frame contains stereo at this
647         // stage)
648         AudioFrameOperations::Scale(left_pan, right_pan, audioFrame);
649     }
650
651     // Mix decoded PCM output with file if file mixing is enabled
652     if (state.output_file_playing)
653     {
654         MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_);
655     }
656
657     // External media
658     if (_outputExternalMedia)
659     {
660         CriticalSectionScoped cs(&_callbackCritSect);
661         const bool isStereo = (audioFrame.num_channels_ == 2);
662         if (_outputExternalMediaCallbackPtr)
663         {
664             _outputExternalMediaCallbackPtr->Process(
665                 _channelId,
666                 kPlaybackPerChannel,
667                 (int16_t*)audioFrame.data_,
668                 audioFrame.samples_per_channel_,
669                 audioFrame.sample_rate_hz_,
670                 isStereo);
671         }
672     }
673
674     // Record playout if enabled
675     {
676         CriticalSectionScoped cs(&_fileCritSect);
677
678         if (_outputFileRecording && _outputFileRecorderPtr)
679         {
680             _outputFileRecorderPtr->RecordAudioToFile(audioFrame);
681         }
682     }
683
684     // Measure audio level (0-9)
685     _outputAudioLevel.ComputeLevel(audioFrame);
686
687     if (capture_start_rtp_time_stamp_ < 0 && audioFrame.timestamp_ != 0) {
688       // The first frame with a valid rtp timestamp.
689       capture_start_rtp_time_stamp_ = audioFrame.timestamp_;
690     }
691
692     if (capture_start_rtp_time_stamp_ >= 0) {
693       // audioFrame.timestamp_ should be valid from now on.
694
695       // Compute elapsed time.
696       int64_t unwrap_timestamp =
697           rtp_ts_wraparound_handler_->Unwrap(audioFrame.timestamp_);
698       audioFrame.elapsed_time_ms_ =
699           (unwrap_timestamp - capture_start_rtp_time_stamp_) /
700           (GetPlayoutFrequency() / 1000);
701
702       // Compute ntp time.
703       audioFrame.ntp_time_ms_ = ntp_estimator_->Estimate(audioFrame.timestamp_);
704       // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
705       if (audioFrame.ntp_time_ms_ > 0) {
706         // Compute |capture_start_ntp_time_ms_| so that
707         // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
708         CriticalSectionScoped lock(ts_stats_lock_.get());
709         capture_start_ntp_time_ms_ =
710             audioFrame.ntp_time_ms_ - audioFrame.elapsed_time_ms_;
711       }
712     }
713
714     return 0;
715 }
716
717 int32_t
718 Channel::NeededFrequency(int32_t id)
719 {
720     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
721                  "Channel::NeededFrequency(id=%d)", id);
722
723     int highestNeeded = 0;
724
725     // Determine highest needed receive frequency
726     int32_t receiveFrequency = audio_coding_->ReceiveFrequency();
727
728     // Return the bigger of playout and receive frequency in the ACM.
729     if (audio_coding_->PlayoutFrequency() > receiveFrequency)
730     {
731         highestNeeded = audio_coding_->PlayoutFrequency();
732     }
733     else
734     {
735         highestNeeded = receiveFrequency;
736     }
737
738     // Special case, if we're playing a file on the playout side
739     // we take that frequency into consideration as well
740     // This is not needed on sending side, since the codec will
741     // limit the spectrum anyway.
742     if (channel_state_.Get().output_file_playing)
743     {
744         CriticalSectionScoped cs(&_fileCritSect);
745         if (_outputFilePlayerPtr)
746         {
747             if(_outputFilePlayerPtr->Frequency()>highestNeeded)
748             {
749                 highestNeeded=_outputFilePlayerPtr->Frequency();
750             }
751         }
752     }
753
754     return(highestNeeded);
755 }
756
757 int32_t
758 Channel::CreateChannel(Channel*& channel,
759                        int32_t channelId,
760                        uint32_t instanceId,
761                        const Config& config)
762 {
763     WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId,channelId),
764                  "Channel::CreateChannel(channelId=%d, instanceId=%d)",
765         channelId, instanceId);
766
767     channel = new Channel(channelId, instanceId, config);
768     if (channel == NULL)
769     {
770         WEBRTC_TRACE(kTraceMemory, kTraceVoice,
771                      VoEId(instanceId,channelId),
772                      "Channel::CreateChannel() unable to allocate memory for"
773                      " channel");
774         return -1;
775     }
776     return 0;
777 }
778
779 void
780 Channel::PlayNotification(int32_t id, uint32_t durationMs)
781 {
782     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
783                  "Channel::PlayNotification(id=%d, durationMs=%d)",
784                  id, durationMs);
785
786     // Not implement yet
787 }
788
789 void
790 Channel::RecordNotification(int32_t id, uint32_t durationMs)
791 {
792     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
793                  "Channel::RecordNotification(id=%d, durationMs=%d)",
794                  id, durationMs);
795
796     // Not implement yet
797 }
798
799 void
800 Channel::PlayFileEnded(int32_t id)
801 {
802     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
803                  "Channel::PlayFileEnded(id=%d)", id);
804
805     if (id == _inputFilePlayerId)
806     {
807         channel_state_.SetInputFilePlaying(false);
808         WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
809                      VoEId(_instanceId,_channelId),
810                      "Channel::PlayFileEnded() => input file player module is"
811                      " shutdown");
812     }
813     else if (id == _outputFilePlayerId)
814     {
815         channel_state_.SetOutputFilePlaying(false);
816         WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
817                      VoEId(_instanceId,_channelId),
818                      "Channel::PlayFileEnded() => output file player module is"
819                      " shutdown");
820     }
821 }
822
823 void
824 Channel::RecordFileEnded(int32_t id)
825 {
826     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
827                  "Channel::RecordFileEnded(id=%d)", id);
828
829     assert(id == _outputFileRecorderId);
830
831     CriticalSectionScoped cs(&_fileCritSect);
832
833     _outputFileRecording = false;
834     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
835                  VoEId(_instanceId,_channelId),
836                  "Channel::RecordFileEnded() => output file recorder module is"
837                  " shutdown");
838 }
839
840 Channel::Channel(int32_t channelId,
841                  uint32_t instanceId,
842                  const Config& config) :
843     _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
844     _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
845     volume_settings_critsect_(*CriticalSectionWrapper::CreateCriticalSection()),
846     _instanceId(instanceId),
847     _channelId(channelId),
848     rtp_header_parser_(RtpHeaderParser::Create()),
849     rtp_payload_registry_(
850         new RTPPayloadRegistry(RTPPayloadStrategy::CreateStrategy(true))),
851     rtp_receive_statistics_(ReceiveStatistics::Create(
852         Clock::GetRealTimeClock())),
853     rtp_receiver_(RtpReceiver::CreateAudioReceiver(
854         VoEModuleId(instanceId, channelId), Clock::GetRealTimeClock(), this,
855         this, this, rtp_payload_registry_.get())),
856     telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()),
857     audio_coding_(AudioCodingModule::Create(
858         VoEModuleId(instanceId, channelId))),
859     _rtpDumpIn(*RtpDump::CreateRtpDump()),
860     _rtpDumpOut(*RtpDump::CreateRtpDump()),
861     _outputAudioLevel(),
862     _externalTransport(false),
863     _audioLevel_dBov(0),
864     _inputFilePlayerPtr(NULL),
865     _outputFilePlayerPtr(NULL),
866     _outputFileRecorderPtr(NULL),
867     // Avoid conflict with other channels by adding 1024 - 1026,
868     // won't use as much as 1024 channels.
869     _inputFilePlayerId(VoEModuleId(instanceId, channelId) + 1024),
870     _outputFilePlayerId(VoEModuleId(instanceId, channelId) + 1025),
871     _outputFileRecorderId(VoEModuleId(instanceId, channelId) + 1026),
872     _outputFileRecording(false),
873     _inbandDtmfQueue(VoEModuleId(instanceId, channelId)),
874     _inbandDtmfGenerator(VoEModuleId(instanceId, channelId)),
875     _outputExternalMedia(false),
876     _inputExternalMediaCallbackPtr(NULL),
877     _outputExternalMediaCallbackPtr(NULL),
878     _timeStamp(0), // This is just an offset, RTP module will add it's own random offset
879     _sendTelephoneEventPayloadType(106),
880     ntp_estimator_(new RemoteNtpTimeEstimator(Clock::GetRealTimeClock())),
881     jitter_buffer_playout_timestamp_(0),
882     playout_timestamp_rtp_(0),
883     playout_timestamp_rtcp_(0),
884     playout_delay_ms_(0),
885     _numberOfDiscardedPackets(0),
886     send_sequence_number_(0),
887     ts_stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
888     rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
889     capture_start_rtp_time_stamp_(-1),
890     capture_start_ntp_time_ms_(-1),
891     _engineStatisticsPtr(NULL),
892     _outputMixerPtr(NULL),
893     _transmitMixerPtr(NULL),
894     _moduleProcessThreadPtr(NULL),
895     _audioDeviceModulePtr(NULL),
896     _voiceEngineObserverPtr(NULL),
897     _callbackCritSectPtr(NULL),
898     _transportPtr(NULL),
899     _rxVadObserverPtr(NULL),
900     _oldVadDecision(-1),
901     _sendFrameType(0),
902     _rtcpObserverPtr(NULL),
903     _externalPlayout(false),
904     _externalMixing(false),
905     _mixFileWithMicrophone(false),
906     _rtcpObserver(false),
907     _mute(false),
908     _panLeft(1.0f),
909     _panRight(1.0f),
910     _outputGain(1.0f),
911     _playOutbandDtmfEvent(false),
912     _playInbandDtmfEvent(false),
913     _lastLocalTimeStamp(0),
914     _lastPayloadType(0),
915     _includeAudioLevelIndication(false),
916     _rtpPacketTimedOut(false),
917     _rtpPacketTimeOutIsEnabled(false),
918     _rtpTimeOutSeconds(0),
919     _connectionObserver(false),
920     _connectionObserverPtr(NULL),
921     _outputSpeechType(AudioFrame::kNormalSpeech),
922     vie_network_(NULL),
923     video_channel_(-1),
924     _average_jitter_buffer_delay_us(0),
925     least_required_delay_ms_(0),
926     _previousTimestamp(0),
927     _recPacketDelayMs(20),
928     _RxVadDetection(false),
929     _rxAgcIsEnabled(false),
930     _rxNsIsEnabled(false),
931     restored_packet_in_use_(false),
932     bitrate_controller_(
933         BitrateController::CreateBitrateController(Clock::GetRealTimeClock(),
934                                                    true)),
935     rtcp_bandwidth_observer_(
936         bitrate_controller_->CreateRtcpBandwidthObserver()),
937     send_bitrate_observer_(new VoEBitrateObserver(this)),
938     network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock()))
939 {
940     WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
941                  "Channel::Channel() - ctor");
942     _inbandDtmfQueue.ResetDtmf();
943     _inbandDtmfGenerator.Init();
944     _outputAudioLevel.Clear();
945
946     RtpRtcp::Configuration configuration;
947     configuration.id = VoEModuleId(instanceId, channelId);
948     configuration.audio = true;
949     configuration.outgoing_transport = this;
950     configuration.rtcp_feedback = this;
951     configuration.audio_messages = this;
952     configuration.receive_statistics = rtp_receive_statistics_.get();
953     configuration.bandwidth_callback = rtcp_bandwidth_observer_.get();
954
955     _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
956
957     statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC()));
958     rtp_receive_statistics_->RegisterRtcpStatisticsCallback(
959         statistics_proxy_.get());
960
961     Config audioproc_config;
962     audioproc_config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
963     rx_audioproc_.reset(AudioProcessing::Create(audioproc_config));
964 }
965
966 Channel::~Channel()
967 {
968     rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
969     WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
970                  "Channel::~Channel() - dtor");
971
972     if (_outputExternalMedia)
973     {
974         DeRegisterExternalMediaProcessing(kPlaybackPerChannel);
975     }
976     if (channel_state_.Get().input_external_media)
977     {
978         DeRegisterExternalMediaProcessing(kRecordingPerChannel);
979     }
980     StopSend();
981     StopPlayout();
982
983     {
984         CriticalSectionScoped cs(&_fileCritSect);
985         if (_inputFilePlayerPtr)
986         {
987             _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
988             _inputFilePlayerPtr->StopPlayingFile();
989             FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
990             _inputFilePlayerPtr = NULL;
991         }
992         if (_outputFilePlayerPtr)
993         {
994             _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
995             _outputFilePlayerPtr->StopPlayingFile();
996             FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
997             _outputFilePlayerPtr = NULL;
998         }
999         if (_outputFileRecorderPtr)
1000         {
1001             _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
1002             _outputFileRecorderPtr->StopRecording();
1003             FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
1004             _outputFileRecorderPtr = NULL;
1005         }
1006     }
1007
1008     // The order to safely shutdown modules in a channel is:
1009     // 1. De-register callbacks in modules
1010     // 2. De-register modules in process thread
1011     // 3. Destroy modules
1012     if (audio_coding_->RegisterTransportCallback(NULL) == -1)
1013     {
1014         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1015                      VoEId(_instanceId,_channelId),
1016                      "~Channel() failed to de-register transport callback"
1017                      " (Audio coding module)");
1018     }
1019     if (audio_coding_->RegisterVADCallback(NULL) == -1)
1020     {
1021         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1022                      VoEId(_instanceId,_channelId),
1023                      "~Channel() failed to de-register VAD callback"
1024                      " (Audio coding module)");
1025     }
1026     // De-register modules in process thread
1027     if (_moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()) == -1)
1028     {
1029         WEBRTC_TRACE(kTraceInfo, kTraceVoice,
1030                      VoEId(_instanceId,_channelId),
1031                      "~Channel() failed to deregister RTP/RTCP module");
1032     }
1033     // End of modules shutdown
1034
1035     // Delete other objects
1036     if (vie_network_) {
1037       vie_network_->Release();
1038       vie_network_ = NULL;
1039     }
1040     RtpDump::DestroyRtpDump(&_rtpDumpIn);
1041     RtpDump::DestroyRtpDump(&_rtpDumpOut);
1042     delete &_callbackCritSect;
1043     delete &_fileCritSect;
1044     delete &volume_settings_critsect_;
1045 }
1046
1047 int32_t
1048 Channel::Init()
1049 {
1050     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1051                  "Channel::Init()");
1052
1053     channel_state_.Reset();
1054
1055     // --- Initial sanity
1056
1057     if ((_engineStatisticsPtr == NULL) ||
1058         (_moduleProcessThreadPtr == NULL))
1059     {
1060         WEBRTC_TRACE(kTraceError, kTraceVoice,
1061                      VoEId(_instanceId,_channelId),
1062                      "Channel::Init() must call SetEngineInformation() first");
1063         return -1;
1064     }
1065
1066     // --- Add modules to process thread (for periodic schedulation)
1067
1068     const bool processThreadFail =
1069         ((_moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get()) != 0) ||
1070         false);
1071     if (processThreadFail)
1072     {
1073         _engineStatisticsPtr->SetLastError(
1074             VE_CANNOT_INIT_CHANNEL, kTraceError,
1075             "Channel::Init() modules not registered");
1076         return -1;
1077     }
1078     // --- ACM initialization
1079
1080     if ((audio_coding_->InitializeReceiver() == -1) ||
1081 #ifdef WEBRTC_CODEC_AVT
1082         // out-of-band Dtmf tones are played out by default
1083         (audio_coding_->SetDtmfPlayoutStatus(true) == -1) ||
1084 #endif
1085         (audio_coding_->InitializeSender() == -1))
1086     {
1087         _engineStatisticsPtr->SetLastError(
1088             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1089             "Channel::Init() unable to initialize the ACM - 1");
1090         return -1;
1091     }
1092
1093     // --- RTP/RTCP module initialization
1094
1095     // Ensure that RTCP is enabled by default for the created channel.
1096     // Note that, the module will keep generating RTCP until it is explicitly
1097     // disabled by the user.
1098     // After StopListen (when no sockets exists), RTCP packets will no longer
1099     // be transmitted since the Transport object will then be invalid.
1100     telephone_event_handler_->SetTelephoneEventForwardToDecoder(true);
1101     // RTCP is enabled by default.
1102     if (_rtpRtcpModule->SetRTCPStatus(kRtcpCompound) == -1)
1103     {
1104         _engineStatisticsPtr->SetLastError(
1105             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1106             "Channel::Init() RTP/RTCP module not initialized");
1107         return -1;
1108     }
1109
1110      // --- Register all permanent callbacks
1111     const bool fail =
1112         (audio_coding_->RegisterTransportCallback(this) == -1) ||
1113         (audio_coding_->RegisterVADCallback(this) == -1);
1114
1115     if (fail)
1116     {
1117         _engineStatisticsPtr->SetLastError(
1118             VE_CANNOT_INIT_CHANNEL, kTraceError,
1119             "Channel::Init() callbacks not registered");
1120         return -1;
1121     }
1122
1123     // --- Register all supported codecs to the receiving side of the
1124     // RTP/RTCP module
1125
1126     CodecInst codec;
1127     const uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
1128
1129     for (int idx = 0; idx < nSupportedCodecs; idx++)
1130     {
1131         // Open up the RTP/RTCP receiver for all supported codecs
1132         if ((audio_coding_->Codec(idx, &codec) == -1) ||
1133             (rtp_receiver_->RegisterReceivePayload(
1134                 codec.plname,
1135                 codec.pltype,
1136                 codec.plfreq,
1137                 codec.channels,
1138                 (codec.rate < 0) ? 0 : codec.rate) == -1))
1139         {
1140             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1141                          VoEId(_instanceId,_channelId),
1142                          "Channel::Init() unable to register %s (%d/%d/%d/%d) "
1143                          "to RTP/RTCP receiver",
1144                          codec.plname, codec.pltype, codec.plfreq,
1145                          codec.channels, codec.rate);
1146         }
1147         else
1148         {
1149             WEBRTC_TRACE(kTraceInfo, kTraceVoice,
1150                          VoEId(_instanceId,_channelId),
1151                          "Channel::Init() %s (%d/%d/%d/%d) has been added to "
1152                          "the RTP/RTCP receiver",
1153                          codec.plname, codec.pltype, codec.plfreq,
1154                          codec.channels, codec.rate);
1155         }
1156
1157         // Ensure that PCMU is used as default codec on the sending side
1158         if (!STR_CASE_CMP(codec.plname, "PCMU") && (codec.channels == 1))
1159         {
1160             SetSendCodec(codec);
1161         }
1162
1163         // Register default PT for outband 'telephone-event'
1164         if (!STR_CASE_CMP(codec.plname, "telephone-event"))
1165         {
1166             if ((_rtpRtcpModule->RegisterSendPayload(codec) == -1) ||
1167                 (audio_coding_->RegisterReceiveCodec(codec) == -1))
1168             {
1169                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1170                              VoEId(_instanceId,_channelId),
1171                              "Channel::Init() failed to register outband "
1172                              "'telephone-event' (%d/%d) correctly",
1173                              codec.pltype, codec.plfreq);
1174             }
1175         }
1176
1177         if (!STR_CASE_CMP(codec.plname, "CN"))
1178         {
1179             if ((audio_coding_->RegisterSendCodec(codec) == -1) ||
1180                 (audio_coding_->RegisterReceiveCodec(codec) == -1) ||
1181                 (_rtpRtcpModule->RegisterSendPayload(codec) == -1))
1182             {
1183                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1184                              VoEId(_instanceId,_channelId),
1185                              "Channel::Init() failed to register CN (%d/%d) "
1186                              "correctly - 1",
1187                              codec.pltype, codec.plfreq);
1188             }
1189         }
1190 #ifdef WEBRTC_CODEC_RED
1191         // Register RED to the receiving side of the ACM.
1192         // We will not receive an OnInitializeDecoder() callback for RED.
1193         if (!STR_CASE_CMP(codec.plname, "RED"))
1194         {
1195             if (audio_coding_->RegisterReceiveCodec(codec) == -1)
1196             {
1197                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1198                              VoEId(_instanceId,_channelId),
1199                              "Channel::Init() failed to register RED (%d/%d) "
1200                              "correctly",
1201                              codec.pltype, codec.plfreq);
1202             }
1203         }
1204 #endif
1205     }
1206
1207     if (rx_audioproc_->noise_suppression()->set_level(kDefaultNsMode) != 0) {
1208       LOG_FERR1(LS_ERROR, noise_suppression()->set_level, kDefaultNsMode);
1209       return -1;
1210     }
1211     if (rx_audioproc_->gain_control()->set_mode(kDefaultRxAgcMode) != 0) {
1212       LOG_FERR1(LS_ERROR, gain_control()->set_mode, kDefaultRxAgcMode);
1213       return -1;
1214     }
1215
1216     return 0;
1217 }
1218
1219 int32_t
1220 Channel::SetEngineInformation(Statistics& engineStatistics,
1221                               OutputMixer& outputMixer,
1222                               voe::TransmitMixer& transmitMixer,
1223                               ProcessThread& moduleProcessThread,
1224                               AudioDeviceModule& audioDeviceModule,
1225                               VoiceEngineObserver* voiceEngineObserver,
1226                               CriticalSectionWrapper* callbackCritSect)
1227 {
1228     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1229                  "Channel::SetEngineInformation()");
1230     _engineStatisticsPtr = &engineStatistics;
1231     _outputMixerPtr = &outputMixer;
1232     _transmitMixerPtr = &transmitMixer,
1233     _moduleProcessThreadPtr = &moduleProcessThread;
1234     _audioDeviceModulePtr = &audioDeviceModule;
1235     _voiceEngineObserverPtr = voiceEngineObserver;
1236     _callbackCritSectPtr = callbackCritSect;
1237     return 0;
1238 }
1239
1240 int32_t
1241 Channel::UpdateLocalTimeStamp()
1242 {
1243
1244     _timeStamp += _audioFrame.samples_per_channel_;
1245     return 0;
1246 }
1247
1248 int32_t
1249 Channel::StartPlayout()
1250 {
1251     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1252                  "Channel::StartPlayout()");
1253     if (channel_state_.Get().playing)
1254     {
1255         return 0;
1256     }
1257
1258     if (!_externalMixing) {
1259         // Add participant as candidates for mixing.
1260         if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0)
1261         {
1262             _engineStatisticsPtr->SetLastError(
1263                 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
1264                 "StartPlayout() failed to add participant to mixer");
1265             return -1;
1266         }
1267     }
1268
1269     channel_state_.SetPlaying(true);
1270     if (RegisterFilePlayingToMixer() != 0)
1271         return -1;
1272
1273     return 0;
1274 }
1275
1276 int32_t
1277 Channel::StopPlayout()
1278 {
1279     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1280                  "Channel::StopPlayout()");
1281     if (!channel_state_.Get().playing)
1282     {
1283         return 0;
1284     }
1285
1286     if (!_externalMixing) {
1287         // Remove participant as candidates for mixing
1288         if (_outputMixerPtr->SetMixabilityStatus(*this, false) != 0)
1289         {
1290             _engineStatisticsPtr->SetLastError(
1291                 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
1292                 "StopPlayout() failed to remove participant from mixer");
1293             return -1;
1294         }
1295     }
1296
1297     channel_state_.SetPlaying(false);
1298     _outputAudioLevel.Clear();
1299
1300     return 0;
1301 }
1302
1303 int32_t
1304 Channel::StartSend()
1305 {
1306     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1307                  "Channel::StartSend()");
1308     // Resume the previous sequence number which was reset by StopSend().
1309     // This needs to be done before |sending| is set to true.
1310     if (send_sequence_number_)
1311       SetInitSequenceNumber(send_sequence_number_);
1312
1313     if (channel_state_.Get().sending)
1314     {
1315       return 0;
1316     }
1317     channel_state_.SetSending(true);
1318
1319     if (_rtpRtcpModule->SetSendingStatus(true) != 0)
1320     {
1321         _engineStatisticsPtr->SetLastError(
1322             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1323             "StartSend() RTP/RTCP failed to start sending");
1324         CriticalSectionScoped cs(&_callbackCritSect);
1325         channel_state_.SetSending(false);
1326         return -1;
1327     }
1328
1329     return 0;
1330 }
1331
1332 int32_t
1333 Channel::StopSend()
1334 {
1335     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1336                  "Channel::StopSend()");
1337     if (!channel_state_.Get().sending)
1338     {
1339       return 0;
1340     }
1341     channel_state_.SetSending(false);
1342
1343     // Store the sequence number to be able to pick up the same sequence for
1344     // the next StartSend(). This is needed for restarting device, otherwise
1345     // it might cause libSRTP to complain about packets being replayed.
1346     // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
1347     // CL is landed. See issue
1348     // https://code.google.com/p/webrtc/issues/detail?id=2111 .
1349     send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
1350
1351     // Reset sending SSRC and sequence number and triggers direct transmission
1352     // of RTCP BYE
1353     if (_rtpRtcpModule->SetSendingStatus(false) == -1 ||
1354         _rtpRtcpModule->ResetSendDataCountersRTP() == -1)
1355     {
1356         _engineStatisticsPtr->SetLastError(
1357             VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
1358             "StartSend() RTP/RTCP failed to stop sending");
1359     }
1360
1361     return 0;
1362 }
1363
1364 int32_t
1365 Channel::StartReceiving()
1366 {
1367     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1368                  "Channel::StartReceiving()");
1369     if (channel_state_.Get().receiving)
1370     {
1371         return 0;
1372     }
1373     channel_state_.SetReceiving(true);
1374     _numberOfDiscardedPackets = 0;
1375     return 0;
1376 }
1377
1378 int32_t
1379 Channel::StopReceiving()
1380 {
1381     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1382                  "Channel::StopReceiving()");
1383     if (!channel_state_.Get().receiving)
1384     {
1385         return 0;
1386     }
1387
1388     channel_state_.SetReceiving(false);
1389     return 0;
1390 }
1391
1392 int32_t
1393 Channel::SetNetEQPlayoutMode(NetEqModes mode)
1394 {
1395     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1396                  "Channel::SetNetEQPlayoutMode()");
1397     AudioPlayoutMode playoutMode(voice);
1398     switch (mode)
1399     {
1400         case kNetEqDefault:
1401             playoutMode = voice;
1402             break;
1403         case kNetEqStreaming:
1404             playoutMode = streaming;
1405             break;
1406         case kNetEqFax:
1407             playoutMode = fax;
1408             break;
1409         case kNetEqOff:
1410             playoutMode = off;
1411             break;
1412     }
1413     if (audio_coding_->SetPlayoutMode(playoutMode) != 0)
1414     {
1415         _engineStatisticsPtr->SetLastError(
1416             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1417             "SetNetEQPlayoutMode() failed to set playout mode");
1418         return -1;
1419     }
1420     return 0;
1421 }
1422
1423 int32_t
1424 Channel::GetNetEQPlayoutMode(NetEqModes& mode)
1425 {
1426     const AudioPlayoutMode playoutMode = audio_coding_->PlayoutMode();
1427     switch (playoutMode)
1428     {
1429         case voice:
1430             mode = kNetEqDefault;
1431             break;
1432         case streaming:
1433             mode = kNetEqStreaming;
1434             break;
1435         case fax:
1436             mode = kNetEqFax;
1437             break;
1438         case off:
1439             mode = kNetEqOff;
1440     }
1441     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
1442                  VoEId(_instanceId,_channelId),
1443                  "Channel::GetNetEQPlayoutMode() => mode=%u", mode);
1444     return 0;
1445 }
1446
1447 int32_t
1448 Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
1449 {
1450     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1451                  "Channel::RegisterVoiceEngineObserver()");
1452     CriticalSectionScoped cs(&_callbackCritSect);
1453
1454     if (_voiceEngineObserverPtr)
1455     {
1456         _engineStatisticsPtr->SetLastError(
1457             VE_INVALID_OPERATION, kTraceError,
1458             "RegisterVoiceEngineObserver() observer already enabled");
1459         return -1;
1460     }
1461     _voiceEngineObserverPtr = &observer;
1462     return 0;
1463 }
1464
1465 int32_t
1466 Channel::DeRegisterVoiceEngineObserver()
1467 {
1468     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1469                  "Channel::DeRegisterVoiceEngineObserver()");
1470     CriticalSectionScoped cs(&_callbackCritSect);
1471
1472     if (!_voiceEngineObserverPtr)
1473     {
1474         _engineStatisticsPtr->SetLastError(
1475             VE_INVALID_OPERATION, kTraceWarning,
1476             "DeRegisterVoiceEngineObserver() observer already disabled");
1477         return 0;
1478     }
1479     _voiceEngineObserverPtr = NULL;
1480     return 0;
1481 }
1482
1483 int32_t
1484 Channel::GetSendCodec(CodecInst& codec)
1485 {
1486     return (audio_coding_->SendCodec(&codec));
1487 }
1488
1489 int32_t
1490 Channel::GetRecCodec(CodecInst& codec)
1491 {
1492     return (audio_coding_->ReceiveCodec(&codec));
1493 }
1494
1495 int32_t
1496 Channel::SetSendCodec(const CodecInst& codec)
1497 {
1498     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1499                  "Channel::SetSendCodec()");
1500
1501     if (audio_coding_->RegisterSendCodec(codec) != 0)
1502     {
1503         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
1504                      "SetSendCodec() failed to register codec to ACM");
1505         return -1;
1506     }
1507
1508     if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1509     {
1510         _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
1511         if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1512         {
1513             WEBRTC_TRACE(
1514                     kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
1515                     "SetSendCodec() failed to register codec to"
1516                     " RTP/RTCP module");
1517             return -1;
1518         }
1519     }
1520
1521     if (_rtpRtcpModule->SetAudioPacketSize(codec.pacsize) != 0)
1522     {
1523         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
1524                      "SetSendCodec() failed to set audio packet size");
1525         return -1;
1526     }
1527
1528     bitrate_controller_->SetBitrateObserver(send_bitrate_observer_.get(),
1529                                             codec.rate, 0, 0);
1530
1531     return 0;
1532 }
1533
1534 void
1535 Channel::OnNetworkChanged(const uint32_t bitrate_bps,
1536                           const uint8_t fraction_lost,  // 0 - 255.
1537                           const uint32_t rtt) {
1538   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1539       "Channel::OnNetworkChanged(bitrate_bps=%d, fration_lost=%d, rtt=%d)",
1540       bitrate_bps, fraction_lost, rtt);
1541   // |fraction_lost| from BitrateObserver is short time observation of packet
1542   // loss rate from past. We use network predictor to make a more reasonable
1543   // loss rate estimation.
1544   network_predictor_->UpdatePacketLossRate(fraction_lost);
1545   uint8_t loss_rate = network_predictor_->GetLossRate();
1546   // Normalizes rate to 0 - 100.
1547   if (audio_coding_->SetPacketLossRate(100 * loss_rate / 255) != 0) {
1548     _engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR,
1549         kTraceError, "OnNetworkChanged() failed to set packet loss rate");
1550     assert(false);  // This should not happen.
1551   }
1552 }
1553
1554 int32_t
1555 Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX)
1556 {
1557     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1558                  "Channel::SetVADStatus(mode=%d)", mode);
1559     // To disable VAD, DTX must be disabled too
1560     disableDTX = ((enableVAD == false) ? true : disableDTX);
1561     if (audio_coding_->SetVAD(!disableDTX, enableVAD, mode) != 0)
1562     {
1563         _engineStatisticsPtr->SetLastError(
1564             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1565             "SetVADStatus() failed to set VAD");
1566         return -1;
1567     }
1568     return 0;
1569 }
1570
1571 int32_t
1572 Channel::GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX)
1573 {
1574     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1575                  "Channel::GetVADStatus");
1576     if (audio_coding_->VAD(&disabledDTX, &enabledVAD, &mode) != 0)
1577     {
1578         _engineStatisticsPtr->SetLastError(
1579             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1580             "GetVADStatus() failed to get VAD status");
1581         return -1;
1582     }
1583     disabledDTX = !disabledDTX;
1584     return 0;
1585 }
1586
1587 int32_t
1588 Channel::SetRecPayloadType(const CodecInst& codec)
1589 {
1590     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1591                  "Channel::SetRecPayloadType()");
1592
1593     if (channel_state_.Get().playing)
1594     {
1595         _engineStatisticsPtr->SetLastError(
1596             VE_ALREADY_PLAYING, kTraceError,
1597             "SetRecPayloadType() unable to set PT while playing");
1598         return -1;
1599     }
1600     if (channel_state_.Get().receiving)
1601     {
1602         _engineStatisticsPtr->SetLastError(
1603             VE_ALREADY_LISTENING, kTraceError,
1604             "SetRecPayloadType() unable to set PT while listening");
1605         return -1;
1606     }
1607
1608     if (codec.pltype == -1)
1609     {
1610         // De-register the selected codec (RTP/RTCP module and ACM)
1611
1612         int8_t pltype(-1);
1613         CodecInst rxCodec = codec;
1614
1615         // Get payload type for the given codec
1616         rtp_payload_registry_->ReceivePayloadType(
1617             rxCodec.plname,
1618             rxCodec.plfreq,
1619             rxCodec.channels,
1620             (rxCodec.rate < 0) ? 0 : rxCodec.rate,
1621             &pltype);
1622         rxCodec.pltype = pltype;
1623
1624         if (rtp_receiver_->DeRegisterReceivePayload(pltype) != 0)
1625         {
1626             _engineStatisticsPtr->SetLastError(
1627                     VE_RTP_RTCP_MODULE_ERROR,
1628                     kTraceError,
1629                     "SetRecPayloadType() RTP/RTCP-module deregistration "
1630                     "failed");
1631             return -1;
1632         }
1633         if (audio_coding_->UnregisterReceiveCodec(rxCodec.pltype) != 0)
1634         {
1635             _engineStatisticsPtr->SetLastError(
1636                 VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1637                 "SetRecPayloadType() ACM deregistration failed - 1");
1638             return -1;
1639         }
1640         return 0;
1641     }
1642
1643     if (rtp_receiver_->RegisterReceivePayload(
1644         codec.plname,
1645         codec.pltype,
1646         codec.plfreq,
1647         codec.channels,
1648         (codec.rate < 0) ? 0 : codec.rate) != 0)
1649     {
1650         // First attempt to register failed => de-register and try again
1651         rtp_receiver_->DeRegisterReceivePayload(codec.pltype);
1652         if (rtp_receiver_->RegisterReceivePayload(
1653             codec.plname,
1654             codec.pltype,
1655             codec.plfreq,
1656             codec.channels,
1657             (codec.rate < 0) ? 0 : codec.rate) != 0)
1658         {
1659             _engineStatisticsPtr->SetLastError(
1660                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1661                 "SetRecPayloadType() RTP/RTCP-module registration failed");
1662             return -1;
1663         }
1664     }
1665     if (audio_coding_->RegisterReceiveCodec(codec) != 0)
1666     {
1667         audio_coding_->UnregisterReceiveCodec(codec.pltype);
1668         if (audio_coding_->RegisterReceiveCodec(codec) != 0)
1669         {
1670             _engineStatisticsPtr->SetLastError(
1671                 VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1672                 "SetRecPayloadType() ACM registration failed - 1");
1673             return -1;
1674         }
1675     }
1676     return 0;
1677 }
1678
1679 int32_t
1680 Channel::GetRecPayloadType(CodecInst& codec)
1681 {
1682     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1683                  "Channel::GetRecPayloadType()");
1684     int8_t payloadType(-1);
1685     if (rtp_payload_registry_->ReceivePayloadType(
1686         codec.plname,
1687         codec.plfreq,
1688         codec.channels,
1689         (codec.rate < 0) ? 0 : codec.rate,
1690         &payloadType) != 0)
1691     {
1692         _engineStatisticsPtr->SetLastError(
1693             VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
1694             "GetRecPayloadType() failed to retrieve RX payload type");
1695         return -1;
1696     }
1697     codec.pltype = payloadType;
1698     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1699                  "Channel::GetRecPayloadType() => pltype=%u", codec.pltype);
1700     return 0;
1701 }
1702
1703 int32_t
1704 Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency)
1705 {
1706     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1707                  "Channel::SetSendCNPayloadType()");
1708
1709     CodecInst codec;
1710     int32_t samplingFreqHz(-1);
1711     const int kMono = 1;
1712     if (frequency == kFreq32000Hz)
1713         samplingFreqHz = 32000;
1714     else if (frequency == kFreq16000Hz)
1715         samplingFreqHz = 16000;
1716
1717     if (audio_coding_->Codec("CN", &codec, samplingFreqHz, kMono) == -1)
1718     {
1719         _engineStatisticsPtr->SetLastError(
1720             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1721             "SetSendCNPayloadType() failed to retrieve default CN codec "
1722             "settings");
1723         return -1;
1724     }
1725
1726     // Modify the payload type (must be set to dynamic range)
1727     codec.pltype = type;
1728
1729     if (audio_coding_->RegisterSendCodec(codec) != 0)
1730     {
1731         _engineStatisticsPtr->SetLastError(
1732             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1733             "SetSendCNPayloadType() failed to register CN to ACM");
1734         return -1;
1735     }
1736
1737     if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1738     {
1739         _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
1740         if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1741         {
1742             _engineStatisticsPtr->SetLastError(
1743                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1744                 "SetSendCNPayloadType() failed to register CN to RTP/RTCP "
1745                 "module");
1746             return -1;
1747         }
1748     }
1749     return 0;
1750 }
1751
1752 int Channel::SetOpusMaxBandwidth(int bandwidth_hz) {
1753   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1754                "Channel::SetOpusMaxBandwidth()");
1755
1756   if (audio_coding_->SetOpusMaxBandwidth(bandwidth_hz) != 0) {
1757     _engineStatisticsPtr->SetLastError(
1758         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1759         "SetOpusMaxBandwidth() failed to set maximum encoding bandwidth");
1760     return -1;
1761   }
1762   return 0;
1763 }
1764
1765 int32_t Channel::RegisterExternalTransport(Transport& transport)
1766 {
1767     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1768                "Channel::RegisterExternalTransport()");
1769
1770     CriticalSectionScoped cs(&_callbackCritSect);
1771
1772     if (_externalTransport)
1773     {
1774         _engineStatisticsPtr->SetLastError(VE_INVALID_OPERATION,
1775                                            kTraceError,
1776               "RegisterExternalTransport() external transport already enabled");
1777        return -1;
1778     }
1779     _externalTransport = true;
1780     _transportPtr = &transport;
1781     return 0;
1782 }
1783
1784 int32_t
1785 Channel::DeRegisterExternalTransport()
1786 {
1787     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1788                  "Channel::DeRegisterExternalTransport()");
1789
1790     CriticalSectionScoped cs(&_callbackCritSect);
1791
1792     if (!_transportPtr)
1793     {
1794         _engineStatisticsPtr->SetLastError(
1795             VE_INVALID_OPERATION, kTraceWarning,
1796             "DeRegisterExternalTransport() external transport already "
1797             "disabled");
1798         return 0;
1799     }
1800     _externalTransport = false;
1801     _transportPtr = NULL;
1802     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1803                  "DeRegisterExternalTransport() all transport is disabled");
1804     return 0;
1805 }
1806
1807 int32_t Channel::ReceivedRTPPacket(const int8_t* data, int32_t length,
1808                                    const PacketTime& packet_time) {
1809   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
1810                "Channel::ReceivedRTPPacket()");
1811
1812   // Store playout timestamp for the received RTP packet
1813   UpdatePlayoutTimestamp(false);
1814
1815   // Dump the RTP packet to a file (if RTP dump is enabled).
1816   if (_rtpDumpIn.DumpPacket((const uint8_t*)data,
1817                             (uint16_t)length) == -1) {
1818     WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1819                  VoEId(_instanceId,_channelId),
1820                  "Channel::SendPacket() RTP dump to input file failed");
1821   }
1822   const uint8_t* received_packet = reinterpret_cast<const uint8_t*>(data);
1823   RTPHeader header;
1824   if (!rtp_header_parser_->Parse(received_packet, length, &header)) {
1825     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId,
1826                  "Incoming packet: invalid RTP header");
1827     return -1;
1828   }
1829   header.payload_type_frequency =
1830       rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
1831   if (header.payload_type_frequency < 0)
1832     return -1;
1833   bool in_order = IsPacketInOrder(header);
1834   rtp_receive_statistics_->IncomingPacket(header, length,
1835       IsPacketRetransmitted(header, in_order));
1836   rtp_payload_registry_->SetIncomingPayloadType(header);
1837
1838   // Forward any packets to ViE bandwidth estimator, if enabled.
1839   {
1840     CriticalSectionScoped cs(&_callbackCritSect);
1841     if (vie_network_) {
1842       int64_t arrival_time_ms;
1843       if (packet_time.timestamp != -1) {
1844         arrival_time_ms = (packet_time.timestamp + 500) / 1000;
1845       } else {
1846         arrival_time_ms = TickTime::MillisecondTimestamp();
1847       }
1848       int payload_length = length - header.headerLength;
1849       vie_network_->ReceivedBWEPacket(video_channel_, arrival_time_ms,
1850                                       payload_length, header);
1851     }
1852   }
1853
1854   return ReceivePacket(received_packet, length, header, in_order) ? 0 : -1;
1855 }
1856
1857 bool Channel::ReceivePacket(const uint8_t* packet,
1858                             int packet_length,
1859                             const RTPHeader& header,
1860                             bool in_order) {
1861   if (rtp_payload_registry_->IsEncapsulated(header)) {
1862     return HandleEncapsulation(packet, packet_length, header);
1863   }
1864   const uint8_t* payload = packet + header.headerLength;
1865   int payload_length = packet_length - header.headerLength;
1866   assert(payload_length >= 0);
1867   PayloadUnion payload_specific;
1868   if (!rtp_payload_registry_->GetPayloadSpecifics(header.payloadType,
1869                                                   &payload_specific)) {
1870     return false;
1871   }
1872   return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length,
1873                                           payload_specific, in_order);
1874 }
1875
1876 bool Channel::HandleEncapsulation(const uint8_t* packet,
1877                                   int packet_length,
1878                                   const RTPHeader& header) {
1879   if (!rtp_payload_registry_->IsRtx(header))
1880     return false;
1881
1882   // Remove the RTX header and parse the original RTP header.
1883   if (packet_length < header.headerLength)
1884     return false;
1885   if (packet_length > kVoiceEngineMaxIpPacketSizeBytes)
1886     return false;
1887   if (restored_packet_in_use_) {
1888     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId,
1889                  "Multiple RTX headers detected, dropping packet");
1890     return false;
1891   }
1892   uint8_t* restored_packet_ptr = restored_packet_;
1893   if (!rtp_payload_registry_->RestoreOriginalPacket(
1894       &restored_packet_ptr, packet, &packet_length, rtp_receiver_->SSRC(),
1895       header)) {
1896     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId,
1897                  "Incoming RTX packet: invalid RTP header");
1898     return false;
1899   }
1900   restored_packet_in_use_ = true;
1901   bool ret = OnRecoveredPacket(restored_packet_ptr, packet_length);
1902   restored_packet_in_use_ = false;
1903   return ret;
1904 }
1905
1906 bool Channel::IsPacketInOrder(const RTPHeader& header) const {
1907   StreamStatistician* statistician =
1908       rtp_receive_statistics_->GetStatistician(header.ssrc);
1909   if (!statistician)
1910     return false;
1911   return statistician->IsPacketInOrder(header.sequenceNumber);
1912 }
1913
1914 bool Channel::IsPacketRetransmitted(const RTPHeader& header,
1915                                     bool in_order) const {
1916   // Retransmissions are handled separately if RTX is enabled.
1917   if (rtp_payload_registry_->RtxEnabled())
1918     return false;
1919   StreamStatistician* statistician =
1920       rtp_receive_statistics_->GetStatistician(header.ssrc);
1921   if (!statistician)
1922     return false;
1923   // Check if this is a retransmission.
1924   uint16_t min_rtt = 0;
1925   _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), NULL, NULL, &min_rtt, NULL);
1926   return !in_order &&
1927       statistician->IsRetransmitOfOldPacket(header, min_rtt);
1928 }
1929
1930 int32_t Channel::ReceivedRTCPPacket(const int8_t* data, int32_t length) {
1931   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
1932                "Channel::ReceivedRTCPPacket()");
1933   // Store playout timestamp for the received RTCP packet
1934   UpdatePlayoutTimestamp(true);
1935
1936   // Dump the RTCP packet to a file (if RTP dump is enabled).
1937   if (_rtpDumpIn.DumpPacket((const uint8_t*)data,
1938                             (uint16_t)length) == -1) {
1939     WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1940                  VoEId(_instanceId,_channelId),
1941                  "Channel::SendPacket() RTCP dump to input file failed");
1942   }
1943
1944   // Deliver RTCP packet to RTP/RTCP module for parsing
1945   if (_rtpRtcpModule->IncomingRtcpPacket((const uint8_t*)data,
1946                                          (uint16_t)length) == -1) {
1947     _engineStatisticsPtr->SetLastError(
1948         VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
1949         "Channel::IncomingRTPPacket() RTCP packet is invalid");
1950   }
1951
1952   ntp_estimator_->UpdateRtcpTimestamp(rtp_receiver_->SSRC(),
1953                                       _rtpRtcpModule.get());
1954   return 0;
1955 }
1956
1957 int Channel::StartPlayingFileLocally(const char* fileName,
1958                                      bool loop,
1959                                      FileFormats format,
1960                                      int startPosition,
1961                                      float volumeScaling,
1962                                      int stopPosition,
1963                                      const CodecInst* codecInst)
1964 {
1965     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1966                  "Channel::StartPlayingFileLocally(fileNameUTF8[]=%s, loop=%d,"
1967                  " format=%d, volumeScaling=%5.3f, startPosition=%d, "
1968                  "stopPosition=%d)", fileName, loop, format, volumeScaling,
1969                  startPosition, stopPosition);
1970
1971     if (channel_state_.Get().output_file_playing)
1972     {
1973         _engineStatisticsPtr->SetLastError(
1974             VE_ALREADY_PLAYING, kTraceError,
1975             "StartPlayingFileLocally() is already playing");
1976         return -1;
1977     }
1978
1979     {
1980         CriticalSectionScoped cs(&_fileCritSect);
1981
1982         if (_outputFilePlayerPtr)
1983         {
1984             _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
1985             FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
1986             _outputFilePlayerPtr = NULL;
1987         }
1988
1989         _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
1990             _outputFilePlayerId, (const FileFormats)format);
1991
1992         if (_outputFilePlayerPtr == NULL)
1993         {
1994             _engineStatisticsPtr->SetLastError(
1995                 VE_INVALID_ARGUMENT, kTraceError,
1996                 "StartPlayingFileLocally() filePlayer format is not correct");
1997             return -1;
1998         }
1999
2000         const uint32_t notificationTime(0);
2001
2002         if (_outputFilePlayerPtr->StartPlayingFile(
2003                 fileName,
2004                 loop,
2005                 startPosition,
2006                 volumeScaling,
2007                 notificationTime,
2008                 stopPosition,
2009                 (const CodecInst*)codecInst) != 0)
2010         {
2011             _engineStatisticsPtr->SetLastError(
2012                 VE_BAD_FILE, kTraceError,
2013                 "StartPlayingFile() failed to start file playout");
2014             _outputFilePlayerPtr->StopPlayingFile();
2015             FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2016             _outputFilePlayerPtr = NULL;
2017             return -1;
2018         }
2019         _outputFilePlayerPtr->RegisterModuleFileCallback(this);
2020         channel_state_.SetOutputFilePlaying(true);
2021     }
2022
2023     if (RegisterFilePlayingToMixer() != 0)
2024         return -1;
2025
2026     return 0;
2027 }
2028
2029 int Channel::StartPlayingFileLocally(InStream* stream,
2030                                      FileFormats format,
2031                                      int startPosition,
2032                                      float volumeScaling,
2033                                      int stopPosition,
2034                                      const CodecInst* codecInst)
2035 {
2036     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2037                  "Channel::StartPlayingFileLocally(format=%d,"
2038                  " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
2039                  format, volumeScaling, startPosition, stopPosition);
2040
2041     if(stream == NULL)
2042     {
2043         _engineStatisticsPtr->SetLastError(
2044             VE_BAD_FILE, kTraceError,
2045             "StartPlayingFileLocally() NULL as input stream");
2046         return -1;
2047     }
2048
2049
2050     if (channel_state_.Get().output_file_playing)
2051     {
2052         _engineStatisticsPtr->SetLastError(
2053             VE_ALREADY_PLAYING, kTraceError,
2054             "StartPlayingFileLocally() is already playing");
2055         return -1;
2056     }
2057
2058     {
2059       CriticalSectionScoped cs(&_fileCritSect);
2060
2061       // Destroy the old instance
2062       if (_outputFilePlayerPtr)
2063       {
2064           _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2065           FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2066           _outputFilePlayerPtr = NULL;
2067       }
2068
2069       // Create the instance
2070       _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
2071           _outputFilePlayerId,
2072           (const FileFormats)format);
2073
2074       if (_outputFilePlayerPtr == NULL)
2075       {
2076           _engineStatisticsPtr->SetLastError(
2077               VE_INVALID_ARGUMENT, kTraceError,
2078               "StartPlayingFileLocally() filePlayer format isnot correct");
2079           return -1;
2080       }
2081
2082       const uint32_t notificationTime(0);
2083
2084       if (_outputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
2085                                                  volumeScaling,
2086                                                  notificationTime,
2087                                                  stopPosition, codecInst) != 0)
2088       {
2089           _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
2090                                              "StartPlayingFile() failed to "
2091                                              "start file playout");
2092           _outputFilePlayerPtr->StopPlayingFile();
2093           FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2094           _outputFilePlayerPtr = NULL;
2095           return -1;
2096       }
2097       _outputFilePlayerPtr->RegisterModuleFileCallback(this);
2098       channel_state_.SetOutputFilePlaying(true);
2099     }
2100
2101     if (RegisterFilePlayingToMixer() != 0)
2102         return -1;
2103
2104     return 0;
2105 }
2106
2107 int Channel::StopPlayingFileLocally()
2108 {
2109     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2110                  "Channel::StopPlayingFileLocally()");
2111
2112     if (!channel_state_.Get().output_file_playing)
2113     {
2114         _engineStatisticsPtr->SetLastError(
2115             VE_INVALID_OPERATION, kTraceWarning,
2116             "StopPlayingFileLocally() isnot playing");
2117         return 0;
2118     }
2119
2120     {
2121         CriticalSectionScoped cs(&_fileCritSect);
2122
2123         if (_outputFilePlayerPtr->StopPlayingFile() != 0)
2124         {
2125             _engineStatisticsPtr->SetLastError(
2126                 VE_STOP_RECORDING_FAILED, kTraceError,
2127                 "StopPlayingFile() could not stop playing");
2128             return -1;
2129         }
2130         _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2131         FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2132         _outputFilePlayerPtr = NULL;
2133         channel_state_.SetOutputFilePlaying(false);
2134     }
2135     // _fileCritSect cannot be taken while calling
2136     // SetAnonymousMixibilityStatus. Refer to comments in
2137     // StartPlayingFileLocally(const char* ...) for more details.
2138     if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, false) != 0)
2139     {
2140         _engineStatisticsPtr->SetLastError(
2141             VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
2142             "StopPlayingFile() failed to stop participant from playing as"
2143             "file in the mixer");
2144         return -1;
2145     }
2146
2147     return 0;
2148 }
2149
2150 int Channel::IsPlayingFileLocally() const
2151 {
2152     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2153                  "Channel::IsPlayingFileLocally()");
2154
2155     return channel_state_.Get().output_file_playing;
2156 }
2157
2158 int Channel::RegisterFilePlayingToMixer()
2159 {
2160     // Return success for not registering for file playing to mixer if:
2161     // 1. playing file before playout is started on that channel.
2162     // 2. starting playout without file playing on that channel.
2163     if (!channel_state_.Get().playing ||
2164         !channel_state_.Get().output_file_playing)
2165     {
2166         return 0;
2167     }
2168
2169     // |_fileCritSect| cannot be taken while calling
2170     // SetAnonymousMixabilityStatus() since as soon as the participant is added
2171     // frames can be pulled by the mixer. Since the frames are generated from
2172     // the file, _fileCritSect will be taken. This would result in a deadlock.
2173     if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0)
2174     {
2175         channel_state_.SetOutputFilePlaying(false);
2176         CriticalSectionScoped cs(&_fileCritSect);
2177         _engineStatisticsPtr->SetLastError(
2178             VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
2179             "StartPlayingFile() failed to add participant as file to mixer");
2180         _outputFilePlayerPtr->StopPlayingFile();
2181         FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2182         _outputFilePlayerPtr = NULL;
2183         return -1;
2184     }
2185
2186     return 0;
2187 }
2188
2189 int Channel::StartPlayingFileAsMicrophone(const char* fileName,
2190                                           bool loop,
2191                                           FileFormats format,
2192                                           int startPosition,
2193                                           float volumeScaling,
2194                                           int stopPosition,
2195                                           const CodecInst* codecInst)
2196 {
2197     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2198                  "Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, "
2199                  "loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, "
2200                  "stopPosition=%d)", fileName, loop, format, volumeScaling,
2201                  startPosition, stopPosition);
2202
2203     CriticalSectionScoped cs(&_fileCritSect);
2204
2205     if (channel_state_.Get().input_file_playing)
2206     {
2207         _engineStatisticsPtr->SetLastError(
2208             VE_ALREADY_PLAYING, kTraceWarning,
2209             "StartPlayingFileAsMicrophone() filePlayer is playing");
2210         return 0;
2211     }
2212
2213     // Destroy the old instance
2214     if (_inputFilePlayerPtr)
2215     {
2216         _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2217         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2218         _inputFilePlayerPtr = NULL;
2219     }
2220
2221     // Create the instance
2222     _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
2223         _inputFilePlayerId, (const FileFormats)format);
2224
2225     if (_inputFilePlayerPtr == NULL)
2226     {
2227         _engineStatisticsPtr->SetLastError(
2228             VE_INVALID_ARGUMENT, kTraceError,
2229             "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
2230         return -1;
2231     }
2232
2233     const uint32_t notificationTime(0);
2234
2235     if (_inputFilePlayerPtr->StartPlayingFile(
2236         fileName,
2237         loop,
2238         startPosition,
2239         volumeScaling,
2240         notificationTime,
2241         stopPosition,
2242         (const CodecInst*)codecInst) != 0)
2243     {
2244         _engineStatisticsPtr->SetLastError(
2245             VE_BAD_FILE, kTraceError,
2246             "StartPlayingFile() failed to start file playout");
2247         _inputFilePlayerPtr->StopPlayingFile();
2248         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2249         _inputFilePlayerPtr = NULL;
2250         return -1;
2251     }
2252     _inputFilePlayerPtr->RegisterModuleFileCallback(this);
2253     channel_state_.SetInputFilePlaying(true);
2254
2255     return 0;
2256 }
2257
2258 int Channel::StartPlayingFileAsMicrophone(InStream* stream,
2259                                           FileFormats format,
2260                                           int startPosition,
2261                                           float volumeScaling,
2262                                           int stopPosition,
2263                                           const CodecInst* codecInst)
2264 {
2265     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2266                  "Channel::StartPlayingFileAsMicrophone(format=%d, "
2267                  "volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
2268                  format, volumeScaling, startPosition, stopPosition);
2269
2270     if(stream == NULL)
2271     {
2272         _engineStatisticsPtr->SetLastError(
2273             VE_BAD_FILE, kTraceError,
2274             "StartPlayingFileAsMicrophone NULL as input stream");
2275         return -1;
2276     }
2277
2278     CriticalSectionScoped cs(&_fileCritSect);
2279
2280     if (channel_state_.Get().input_file_playing)
2281     {
2282         _engineStatisticsPtr->SetLastError(
2283             VE_ALREADY_PLAYING, kTraceWarning,
2284             "StartPlayingFileAsMicrophone() is playing");
2285         return 0;
2286     }
2287
2288     // Destroy the old instance
2289     if (_inputFilePlayerPtr)
2290     {
2291         _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2292         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2293         _inputFilePlayerPtr = NULL;
2294     }
2295
2296     // Create the instance
2297     _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
2298         _inputFilePlayerId, (const FileFormats)format);
2299
2300     if (_inputFilePlayerPtr == NULL)
2301     {
2302         _engineStatisticsPtr->SetLastError(
2303             VE_INVALID_ARGUMENT, kTraceError,
2304             "StartPlayingInputFile() filePlayer format isnot correct");
2305         return -1;
2306     }
2307
2308     const uint32_t notificationTime(0);
2309
2310     if (_inputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
2311                                               volumeScaling, notificationTime,
2312                                               stopPosition, codecInst) != 0)
2313     {
2314         _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
2315                                            "StartPlayingFile() failed to start "
2316                                            "file playout");
2317         _inputFilePlayerPtr->StopPlayingFile();
2318         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2319         _inputFilePlayerPtr = NULL;
2320         return -1;
2321     }
2322
2323     _inputFilePlayerPtr->RegisterModuleFileCallback(this);
2324     channel_state_.SetInputFilePlaying(true);
2325
2326     return 0;
2327 }
2328
2329 int Channel::StopPlayingFileAsMicrophone()
2330 {
2331     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2332                  "Channel::StopPlayingFileAsMicrophone()");
2333
2334     CriticalSectionScoped cs(&_fileCritSect);
2335
2336     if (!channel_state_.Get().input_file_playing)
2337     {
2338         _engineStatisticsPtr->SetLastError(
2339             VE_INVALID_OPERATION, kTraceWarning,
2340             "StopPlayingFileAsMicrophone() isnot playing");
2341         return 0;
2342     }
2343
2344     if (_inputFilePlayerPtr->StopPlayingFile() != 0)
2345     {
2346         _engineStatisticsPtr->SetLastError(
2347             VE_STOP_RECORDING_FAILED, kTraceError,
2348             "StopPlayingFile() could not stop playing");
2349         return -1;
2350     }
2351     _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2352     FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2353     _inputFilePlayerPtr = NULL;
2354     channel_state_.SetInputFilePlaying(false);
2355
2356     return 0;
2357 }
2358
2359 int Channel::IsPlayingFileAsMicrophone() const
2360 {
2361     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2362                  "Channel::IsPlayingFileAsMicrophone()");
2363     return channel_state_.Get().input_file_playing;
2364 }
2365
2366 int Channel::StartRecordingPlayout(const char* fileName,
2367                                    const CodecInst* codecInst)
2368 {
2369     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2370                  "Channel::StartRecordingPlayout(fileName=%s)", fileName);
2371
2372     if (_outputFileRecording)
2373     {
2374         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
2375                      "StartRecordingPlayout() is already recording");
2376         return 0;
2377     }
2378
2379     FileFormats format;
2380     const uint32_t notificationTime(0); // Not supported in VoE
2381     CodecInst dummyCodec={100,"L16",16000,320,1,320000};
2382
2383     if ((codecInst != NULL) &&
2384       ((codecInst->channels < 1) || (codecInst->channels > 2)))
2385     {
2386         _engineStatisticsPtr->SetLastError(
2387             VE_BAD_ARGUMENT, kTraceError,
2388             "StartRecordingPlayout() invalid compression");
2389         return(-1);
2390     }
2391     if(codecInst == NULL)
2392     {
2393         format = kFileFormatPcm16kHzFile;
2394         codecInst=&dummyCodec;
2395     }
2396     else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
2397         (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
2398         (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
2399     {
2400         format = kFileFormatWavFile;
2401     }
2402     else
2403     {
2404         format = kFileFormatCompressedFile;
2405     }
2406
2407     CriticalSectionScoped cs(&_fileCritSect);
2408
2409     // Destroy the old instance
2410     if (_outputFileRecorderPtr)
2411     {
2412         _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
2413         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2414         _outputFileRecorderPtr = NULL;
2415     }
2416
2417     _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
2418         _outputFileRecorderId, (const FileFormats)format);
2419     if (_outputFileRecorderPtr == NULL)
2420     {
2421         _engineStatisticsPtr->SetLastError(
2422             VE_INVALID_ARGUMENT, kTraceError,
2423             "StartRecordingPlayout() fileRecorder format isnot correct");
2424         return -1;
2425     }
2426
2427     if (_outputFileRecorderPtr->StartRecordingAudioFile(
2428         fileName, (const CodecInst&)*codecInst, notificationTime) != 0)
2429     {
2430         _engineStatisticsPtr->SetLastError(
2431             VE_BAD_FILE, kTraceError,
2432             "StartRecordingAudioFile() failed to start file recording");
2433         _outputFileRecorderPtr->StopRecording();
2434         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2435         _outputFileRecorderPtr = NULL;
2436         return -1;
2437     }
2438     _outputFileRecorderPtr->RegisterModuleFileCallback(this);
2439     _outputFileRecording = true;
2440
2441     return 0;
2442 }
2443
2444 int Channel::StartRecordingPlayout(OutStream* stream,
2445                                    const CodecInst* codecInst)
2446 {
2447     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2448                  "Channel::StartRecordingPlayout()");
2449
2450     if (_outputFileRecording)
2451     {
2452         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
2453                      "StartRecordingPlayout() is already recording");
2454         return 0;
2455     }
2456
2457     FileFormats format;
2458     const uint32_t notificationTime(0); // Not supported in VoE
2459     CodecInst dummyCodec={100,"L16",16000,320,1,320000};
2460
2461     if (codecInst != NULL && codecInst->channels != 1)
2462     {
2463         _engineStatisticsPtr->SetLastError(
2464             VE_BAD_ARGUMENT, kTraceError,
2465             "StartRecordingPlayout() invalid compression");
2466         return(-1);
2467     }
2468     if(codecInst == NULL)
2469     {
2470         format = kFileFormatPcm16kHzFile;
2471         codecInst=&dummyCodec;
2472     }
2473     else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
2474         (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
2475         (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
2476     {
2477         format = kFileFormatWavFile;
2478     }
2479     else
2480     {
2481         format = kFileFormatCompressedFile;
2482     }
2483
2484     CriticalSectionScoped cs(&_fileCritSect);
2485
2486     // Destroy the old instance
2487     if (_outputFileRecorderPtr)
2488     {
2489         _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
2490         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2491         _outputFileRecorderPtr = NULL;
2492     }
2493
2494     _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
2495         _outputFileRecorderId, (const FileFormats)format);
2496     if (_outputFileRecorderPtr == NULL)
2497     {
2498         _engineStatisticsPtr->SetLastError(
2499             VE_INVALID_ARGUMENT, kTraceError,
2500             "StartRecordingPlayout() fileRecorder format isnot correct");
2501         return -1;
2502     }
2503
2504     if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream, *codecInst,
2505                                                         notificationTime) != 0)
2506     {
2507         _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
2508                                            "StartRecordingPlayout() failed to "
2509                                            "start file recording");
2510         _outputFileRecorderPtr->StopRecording();
2511         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2512         _outputFileRecorderPtr = NULL;
2513         return -1;
2514     }
2515
2516     _outputFileRecorderPtr->RegisterModuleFileCallback(this);
2517     _outputFileRecording = true;
2518
2519     return 0;
2520 }
2521
2522 int Channel::StopRecordingPlayout()
2523 {
2524     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
2525                  "Channel::StopRecordingPlayout()");
2526
2527     if (!_outputFileRecording)
2528     {
2529         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
2530                      "StopRecordingPlayout() isnot recording");
2531         return -1;
2532     }
2533
2534
2535     CriticalSectionScoped cs(&_fileCritSect);
2536
2537     if (_outputFileRecorderPtr->StopRecording() != 0)
2538     {
2539         _engineStatisticsPtr->SetLastError(
2540             VE_STOP_RECORDING_FAILED, kTraceError,
2541             "StopRecording() could not stop recording");
2542         return(-1);
2543     }
2544     _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
2545     FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2546     _outputFileRecorderPtr = NULL;
2547     _outputFileRecording = false;
2548
2549     return 0;
2550 }
2551
2552 void
2553 Channel::SetMixWithMicStatus(bool mix)
2554 {
2555     CriticalSectionScoped cs(&_fileCritSect);
2556     _mixFileWithMicrophone=mix;
2557 }
2558
2559 int
2560 Channel::GetSpeechOutputLevel(uint32_t& level) const
2561 {
2562     int8_t currentLevel = _outputAudioLevel.Level();
2563     level = static_cast<int32_t> (currentLevel);
2564     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
2565                VoEId(_instanceId,_channelId),
2566                "GetSpeechOutputLevel() => level=%u", level);
2567     return 0;
2568 }
2569
2570 int
2571 Channel::GetSpeechOutputLevelFullRange(uint32_t& level) const
2572 {
2573     int16_t currentLevel = _outputAudioLevel.LevelFullRange();
2574     level = static_cast<int32_t> (currentLevel);
2575     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
2576                VoEId(_instanceId,_channelId),
2577                "GetSpeechOutputLevelFullRange() => level=%u", level);
2578     return 0;
2579 }
2580
2581 int
2582 Channel::SetMute(bool enable)
2583 {
2584     CriticalSectionScoped cs(&volume_settings_critsect_);
2585     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2586                "Channel::SetMute(enable=%d)", enable);
2587     _mute = enable;
2588     return 0;
2589 }
2590
2591 bool
2592 Channel::Mute() const
2593 {
2594     CriticalSectionScoped cs(&volume_settings_critsect_);
2595     return _mute;
2596 }
2597
2598 int
2599 Channel::SetOutputVolumePan(float left, float right)
2600 {
2601     CriticalSectionScoped cs(&volume_settings_critsect_);
2602     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2603                "Channel::SetOutputVolumePan()");
2604     _panLeft = left;
2605     _panRight = right;
2606     return 0;
2607 }
2608
2609 int
2610 Channel::GetOutputVolumePan(float& left, float& right) const
2611 {
2612     CriticalSectionScoped cs(&volume_settings_critsect_);
2613     left = _panLeft;
2614     right = _panRight;
2615     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
2616                VoEId(_instanceId,_channelId),
2617                "GetOutputVolumePan() => left=%3.2f, right=%3.2f", left, right);
2618     return 0;
2619 }
2620
2621 int
2622 Channel::SetChannelOutputVolumeScaling(float scaling)
2623 {
2624     CriticalSectionScoped cs(&volume_settings_critsect_);
2625     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2626                "Channel::SetChannelOutputVolumeScaling()");
2627     _outputGain = scaling;
2628     return 0;
2629 }
2630
2631 int
2632 Channel::GetChannelOutputVolumeScaling(float& scaling) const
2633 {
2634     CriticalSectionScoped cs(&volume_settings_critsect_);
2635     scaling = _outputGain;
2636     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
2637                VoEId(_instanceId,_channelId),
2638                "GetChannelOutputVolumeScaling() => scaling=%3.2f", scaling);
2639     return 0;
2640 }
2641
2642 int Channel::SendTelephoneEventOutband(unsigned char eventCode,
2643                                        int lengthMs, int attenuationDb,
2644                                        bool playDtmfEvent)
2645 {
2646     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2647                "Channel::SendTelephoneEventOutband(..., playDtmfEvent=%d)",
2648                playDtmfEvent);
2649
2650     _playOutbandDtmfEvent = playDtmfEvent;
2651
2652     if (_rtpRtcpModule->SendTelephoneEventOutband(eventCode, lengthMs,
2653                                                  attenuationDb) != 0)
2654     {
2655         _engineStatisticsPtr->SetLastError(
2656             VE_SEND_DTMF_FAILED,
2657             kTraceWarning,
2658             "SendTelephoneEventOutband() failed to send event");
2659         return -1;
2660     }
2661     return 0;
2662 }
2663
2664 int Channel::SendTelephoneEventInband(unsigned char eventCode,
2665                                          int lengthMs,
2666                                          int attenuationDb,
2667                                          bool playDtmfEvent)
2668 {
2669     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2670                "Channel::SendTelephoneEventInband(..., playDtmfEvent=%d)",
2671                playDtmfEvent);
2672
2673     _playInbandDtmfEvent = playDtmfEvent;
2674     _inbandDtmfQueue.AddDtmf(eventCode, lengthMs, attenuationDb);
2675
2676     return 0;
2677 }
2678
2679 int
2680 Channel::SetDtmfPlayoutStatus(bool enable)
2681 {
2682     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2683                "Channel::SetDtmfPlayoutStatus()");
2684     if (audio_coding_->SetDtmfPlayoutStatus(enable) != 0)
2685     {
2686         _engineStatisticsPtr->SetLastError(
2687             VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
2688             "SetDtmfPlayoutStatus() failed to set Dtmf playout");
2689         return -1;
2690     }
2691     return 0;
2692 }
2693
2694 bool
2695 Channel::DtmfPlayoutStatus() const
2696 {
2697     return audio_coding_->DtmfPlayoutStatus();
2698 }
2699
2700 int
2701 Channel::SetSendTelephoneEventPayloadType(unsigned char type)
2702 {
2703     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2704                "Channel::SetSendTelephoneEventPayloadType()");
2705     if (type > 127)
2706     {
2707         _engineStatisticsPtr->SetLastError(
2708             VE_INVALID_ARGUMENT, kTraceError,
2709             "SetSendTelephoneEventPayloadType() invalid type");
2710         return -1;
2711     }
2712     CodecInst codec = {};
2713     codec.plfreq = 8000;
2714     codec.pltype = type;
2715     memcpy(codec.plname, "telephone-event", 16);
2716     if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
2717     {
2718         _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
2719         if (_rtpRtcpModule->RegisterSendPayload(codec) != 0) {
2720             _engineStatisticsPtr->SetLastError(
2721                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
2722                 "SetSendTelephoneEventPayloadType() failed to register send"
2723                 "payload type");
2724             return -1;
2725         }
2726     }
2727     _sendTelephoneEventPayloadType = type;
2728     return 0;
2729 }
2730
2731 int
2732 Channel::GetSendTelephoneEventPayloadType(unsigned char& type)
2733 {
2734     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2735                  "Channel::GetSendTelephoneEventPayloadType()");
2736     type = _sendTelephoneEventPayloadType;
2737     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
2738                VoEId(_instanceId,_channelId),
2739                "GetSendTelephoneEventPayloadType() => type=%u", type);
2740     return 0;
2741 }
2742
2743 int
2744 Channel::UpdateRxVadDetection(AudioFrame& audioFrame)
2745 {
2746     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
2747                  "Channel::UpdateRxVadDetection()");
2748
2749     int vadDecision = 1;
2750
2751     vadDecision = (audioFrame.vad_activity_ == AudioFrame::kVadActive)? 1 : 0;
2752
2753     if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
2754     {
2755         OnRxVadDetected(vadDecision);
2756         _oldVadDecision = vadDecision;
2757     }
2758
2759     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
2760                  "Channel::UpdateRxVadDetection() => vadDecision=%d",
2761                  vadDecision);
2762     return 0;
2763 }
2764
2765 int
2766 Channel::RegisterRxVadObserver(VoERxVadCallback &observer)
2767 {
2768     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2769                  "Channel::RegisterRxVadObserver()");
2770     CriticalSectionScoped cs(&_callbackCritSect);
2771
2772     if (_rxVadObserverPtr)
2773     {
2774         _engineStatisticsPtr->SetLastError(
2775             VE_INVALID_OPERATION, kTraceError,
2776             "RegisterRxVadObserver() observer already enabled");
2777         return -1;
2778     }
2779     _rxVadObserverPtr = &observer;
2780     _RxVadDetection = true;
2781     return 0;
2782 }
2783
2784 int
2785 Channel::DeRegisterRxVadObserver()
2786 {
2787     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2788                  "Channel::DeRegisterRxVadObserver()");
2789     CriticalSectionScoped cs(&_callbackCritSect);
2790
2791     if (!_rxVadObserverPtr)
2792     {
2793         _engineStatisticsPtr->SetLastError(
2794             VE_INVALID_OPERATION, kTraceWarning,
2795             "DeRegisterRxVadObserver() observer already disabled");
2796         return 0;
2797     }
2798     _rxVadObserverPtr = NULL;
2799     _RxVadDetection = false;
2800     return 0;
2801 }
2802
2803 int
2804 Channel::VoiceActivityIndicator(int &activity)
2805 {
2806     activity = _sendFrameType;
2807
2808     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2809                  "Channel::VoiceActivityIndicator(indicator=%d)", activity);
2810     return 0;
2811 }
2812
2813 #ifdef WEBRTC_VOICE_ENGINE_AGC
2814
2815 int
2816 Channel::SetRxAgcStatus(bool enable, AgcModes mode)
2817 {
2818     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2819                  "Channel::SetRxAgcStatus(enable=%d, mode=%d)",
2820                  (int)enable, (int)mode);
2821
2822     GainControl::Mode agcMode = kDefaultRxAgcMode;
2823     switch (mode)
2824     {
2825         case kAgcDefault:
2826             break;
2827         case kAgcUnchanged:
2828             agcMode = rx_audioproc_->gain_control()->mode();
2829             break;
2830         case kAgcFixedDigital:
2831             agcMode = GainControl::kFixedDigital;
2832             break;
2833         case kAgcAdaptiveDigital:
2834             agcMode =GainControl::kAdaptiveDigital;
2835             break;
2836         default:
2837             _engineStatisticsPtr->SetLastError(
2838                 VE_INVALID_ARGUMENT, kTraceError,
2839                 "SetRxAgcStatus() invalid Agc mode");
2840             return -1;
2841     }
2842
2843     if (rx_audioproc_->gain_control()->set_mode(agcMode) != 0)
2844     {
2845         _engineStatisticsPtr->SetLastError(
2846             VE_APM_ERROR, kTraceError,
2847             "SetRxAgcStatus() failed to set Agc mode");
2848         return -1;
2849     }
2850     if (rx_audioproc_->gain_control()->Enable(enable) != 0)
2851     {
2852         _engineStatisticsPtr->SetLastError(
2853             VE_APM_ERROR, kTraceError,
2854             "SetRxAgcStatus() failed to set Agc state");
2855         return -1;
2856     }
2857
2858     _rxAgcIsEnabled = enable;
2859     channel_state_.SetRxApmIsEnabled(_rxAgcIsEnabled || _rxNsIsEnabled);
2860
2861     return 0;
2862 }
2863
2864 int
2865 Channel::GetRxAgcStatus(bool& enabled, AgcModes& mode)
2866 {
2867     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2868                      "Channel::GetRxAgcStatus(enable=?, mode=?)");
2869
2870     bool enable = rx_audioproc_->gain_control()->is_enabled();
2871     GainControl::Mode agcMode =
2872         rx_audioproc_->gain_control()->mode();
2873
2874     enabled = enable;
2875
2876     switch (agcMode)
2877     {
2878         case GainControl::kFixedDigital:
2879             mode = kAgcFixedDigital;
2880             break;
2881         case GainControl::kAdaptiveDigital:
2882             mode = kAgcAdaptiveDigital;
2883             break;
2884         default:
2885             _engineStatisticsPtr->SetLastError(
2886                 VE_APM_ERROR, kTraceError,
2887                 "GetRxAgcStatus() invalid Agc mode");
2888             return -1;
2889     }
2890
2891     return 0;
2892 }
2893
2894 int
2895 Channel::SetRxAgcConfig(AgcConfig config)
2896 {
2897     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2898                  "Channel::SetRxAgcConfig()");
2899
2900     if (rx_audioproc_->gain_control()->set_target_level_dbfs(
2901         config.targetLeveldBOv) != 0)
2902     {
2903         _engineStatisticsPtr->SetLastError(
2904             VE_APM_ERROR, kTraceError,
2905             "SetRxAgcConfig() failed to set target peak |level|"
2906             "(or envelope) of the Agc");
2907         return -1;
2908     }
2909     if (rx_audioproc_->gain_control()->set_compression_gain_db(
2910         config.digitalCompressionGaindB) != 0)
2911     {
2912         _engineStatisticsPtr->SetLastError(
2913             VE_APM_ERROR, kTraceError,
2914             "SetRxAgcConfig() failed to set the range in |gain| the"
2915             " digital compression stage may apply");
2916         return -1;
2917     }
2918     if (rx_audioproc_->gain_control()->enable_limiter(
2919         config.limiterEnable) != 0)
2920     {
2921         _engineStatisticsPtr->SetLastError(
2922             VE_APM_ERROR, kTraceError,
2923             "SetRxAgcConfig() failed to set hard limiter to the signal");
2924         return -1;
2925     }
2926
2927     return 0;
2928 }
2929
2930 int
2931 Channel::GetRxAgcConfig(AgcConfig& config)
2932 {
2933     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2934                  "Channel::GetRxAgcConfig(config=%?)");
2935
2936     config.targetLeveldBOv =
2937         rx_audioproc_->gain_control()->target_level_dbfs();
2938     config.digitalCompressionGaindB =
2939         rx_audioproc_->gain_control()->compression_gain_db();
2940     config.limiterEnable =
2941         rx_audioproc_->gain_control()->is_limiter_enabled();
2942
2943     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
2944                VoEId(_instanceId,_channelId), "GetRxAgcConfig() => "
2945                    "targetLeveldBOv=%u, digitalCompressionGaindB=%u,"
2946                    " limiterEnable=%d",
2947                    config.targetLeveldBOv,
2948                    config.digitalCompressionGaindB,
2949                    config.limiterEnable);
2950
2951     return 0;
2952 }
2953
2954 #endif // #ifdef WEBRTC_VOICE_ENGINE_AGC
2955
2956 #ifdef WEBRTC_VOICE_ENGINE_NR
2957
2958 int
2959 Channel::SetRxNsStatus(bool enable, NsModes mode)
2960 {
2961     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2962                  "Channel::SetRxNsStatus(enable=%d, mode=%d)",
2963                  (int)enable, (int)mode);
2964
2965     NoiseSuppression::Level nsLevel = kDefaultNsMode;
2966     switch (mode)
2967     {
2968
2969         case kNsDefault:
2970             break;
2971         case kNsUnchanged:
2972             nsLevel = rx_audioproc_->noise_suppression()->level();
2973             break;
2974         case kNsConference:
2975             nsLevel = NoiseSuppression::kHigh;
2976             break;
2977         case kNsLowSuppression:
2978             nsLevel = NoiseSuppression::kLow;
2979             break;
2980         case kNsModerateSuppression:
2981             nsLevel = NoiseSuppression::kModerate;
2982             break;
2983         case kNsHighSuppression:
2984             nsLevel = NoiseSuppression::kHigh;
2985             break;
2986         case kNsVeryHighSuppression:
2987             nsLevel = NoiseSuppression::kVeryHigh;
2988             break;
2989     }
2990
2991     if (rx_audioproc_->noise_suppression()->set_level(nsLevel)
2992         != 0)
2993     {
2994         _engineStatisticsPtr->SetLastError(
2995             VE_APM_ERROR, kTraceError,
2996             "SetRxNsStatus() failed to set NS level");
2997         return -1;
2998     }
2999     if (rx_audioproc_->noise_suppression()->Enable(enable) != 0)
3000     {
3001         _engineStatisticsPtr->SetLastError(
3002             VE_APM_ERROR, kTraceError,
3003             "SetRxNsStatus() failed to set NS state");
3004         return -1;
3005     }
3006
3007     _rxNsIsEnabled = enable;
3008     channel_state_.SetRxApmIsEnabled(_rxAgcIsEnabled || _rxNsIsEnabled);
3009
3010     return 0;
3011 }
3012
3013 int
3014 Channel::GetRxNsStatus(bool& enabled, NsModes& mode)
3015 {
3016     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3017                  "Channel::GetRxNsStatus(enable=?, mode=?)");
3018
3019     bool enable =
3020         rx_audioproc_->noise_suppression()->is_enabled();
3021     NoiseSuppression::Level ncLevel =
3022         rx_audioproc_->noise_suppression()->level();
3023
3024     enabled = enable;
3025
3026     switch (ncLevel)
3027     {
3028         case NoiseSuppression::kLow:
3029             mode = kNsLowSuppression;
3030             break;
3031         case NoiseSuppression::kModerate:
3032             mode = kNsModerateSuppression;
3033             break;
3034         case NoiseSuppression::kHigh:
3035             mode = kNsHighSuppression;
3036             break;
3037         case NoiseSuppression::kVeryHigh:
3038             mode = kNsVeryHighSuppression;
3039             break;
3040     }
3041
3042     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3043                VoEId(_instanceId,_channelId),
3044                "GetRxNsStatus() => enabled=%d, mode=%d", enabled, mode);
3045     return 0;
3046 }
3047
3048 #endif // #ifdef WEBRTC_VOICE_ENGINE_NR
3049
3050 int
3051 Channel::RegisterRTCPObserver(VoERTCPObserver& observer)
3052 {
3053     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3054                  "Channel::RegisterRTCPObserver()");
3055     CriticalSectionScoped cs(&_callbackCritSect);
3056
3057     if (_rtcpObserverPtr)
3058     {
3059         _engineStatisticsPtr->SetLastError(
3060             VE_INVALID_OPERATION, kTraceError,
3061             "RegisterRTCPObserver() observer already enabled");
3062         return -1;
3063     }
3064
3065     _rtcpObserverPtr = &observer;
3066     _rtcpObserver = true;
3067
3068     return 0;
3069 }
3070
3071 int
3072 Channel::DeRegisterRTCPObserver()
3073 {
3074     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3075                  "Channel::DeRegisterRTCPObserver()");
3076     CriticalSectionScoped cs(&_callbackCritSect);
3077
3078     if (!_rtcpObserverPtr)
3079     {
3080         _engineStatisticsPtr->SetLastError(
3081             VE_INVALID_OPERATION, kTraceWarning,
3082             "DeRegisterRTCPObserver() observer already disabled");
3083         return 0;
3084     }
3085
3086     _rtcpObserver = false;
3087     _rtcpObserverPtr = NULL;
3088
3089     return 0;
3090 }
3091
3092 int
3093 Channel::SetLocalSSRC(unsigned int ssrc)
3094 {
3095     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3096                  "Channel::SetLocalSSRC()");
3097     if (channel_state_.Get().sending)
3098     {
3099         _engineStatisticsPtr->SetLastError(
3100             VE_ALREADY_SENDING, kTraceError,
3101             "SetLocalSSRC() already sending");
3102         return -1;
3103     }
3104     _rtpRtcpModule->SetSSRC(ssrc);
3105     return 0;
3106 }
3107
3108 int
3109 Channel::GetLocalSSRC(unsigned int& ssrc)
3110 {
3111     ssrc = _rtpRtcpModule->SSRC();
3112     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3113                  VoEId(_instanceId,_channelId),
3114                  "GetLocalSSRC() => ssrc=%lu", ssrc);
3115     return 0;
3116 }
3117
3118 int
3119 Channel::GetRemoteSSRC(unsigned int& ssrc)
3120 {
3121     ssrc = rtp_receiver_->SSRC();
3122     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3123                  VoEId(_instanceId,_channelId),
3124                  "GetRemoteSSRC() => ssrc=%lu", ssrc);
3125     return 0;
3126 }
3127
3128 int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
3129   _includeAudioLevelIndication = enable;
3130   return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
3131 }
3132
3133 int Channel::SetReceiveAudioLevelIndicationStatus(bool enable,
3134                                                   unsigned char id) {
3135   rtp_header_parser_->DeregisterRtpHeaderExtension(
3136       kRtpExtensionAudioLevel);
3137   if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(
3138           kRtpExtensionAudioLevel, id)) {
3139     return -1;
3140   }
3141   return 0;
3142 }
3143
3144 int Channel::SetSendAbsoluteSenderTimeStatus(bool enable, unsigned char id) {
3145   return SetSendRtpHeaderExtension(enable, kRtpExtensionAbsoluteSendTime, id);
3146 }
3147
3148 int Channel::SetReceiveAbsoluteSenderTimeStatus(bool enable, unsigned char id) {
3149   rtp_header_parser_->DeregisterRtpHeaderExtension(
3150       kRtpExtensionAbsoluteSendTime);
3151   if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(
3152       kRtpExtensionAbsoluteSendTime, id)) {
3153     return -1;
3154   }
3155   return 0;
3156 }
3157
3158 int
3159 Channel::SetRTCPStatus(bool enable)
3160 {
3161     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3162                  "Channel::SetRTCPStatus()");
3163     if (_rtpRtcpModule->SetRTCPStatus(enable ?
3164         kRtcpCompound : kRtcpOff) != 0)
3165     {
3166         _engineStatisticsPtr->SetLastError(
3167             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3168             "SetRTCPStatus() failed to set RTCP status");
3169         return -1;
3170     }
3171     return 0;
3172 }
3173
3174 int
3175 Channel::GetRTCPStatus(bool& enabled)
3176 {
3177     RTCPMethod method = _rtpRtcpModule->RTCP();
3178     enabled = (method != kRtcpOff);
3179     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3180                  VoEId(_instanceId,_channelId),
3181                  "GetRTCPStatus() => enabled=%d", enabled);
3182     return 0;
3183 }
3184
3185 int
3186 Channel::SetRTCP_CNAME(const char cName[256])
3187 {
3188     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3189                  "Channel::SetRTCP_CNAME()");
3190     if (_rtpRtcpModule->SetCNAME(cName) != 0)
3191     {
3192         _engineStatisticsPtr->SetLastError(
3193             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3194             "SetRTCP_CNAME() failed to set RTCP CNAME");
3195         return -1;
3196     }
3197     return 0;
3198 }
3199
3200 int
3201 Channel::GetRemoteRTCP_CNAME(char cName[256])
3202 {
3203     if (cName == NULL)
3204     {
3205         _engineStatisticsPtr->SetLastError(
3206             VE_INVALID_ARGUMENT, kTraceError,
3207             "GetRemoteRTCP_CNAME() invalid CNAME input buffer");
3208         return -1;
3209     }
3210     char cname[RTCP_CNAME_SIZE];
3211     const uint32_t remoteSSRC = rtp_receiver_->SSRC();
3212     if (_rtpRtcpModule->RemoteCNAME(remoteSSRC, cname) != 0)
3213     {
3214         _engineStatisticsPtr->SetLastError(
3215             VE_CANNOT_RETRIEVE_CNAME, kTraceError,
3216             "GetRemoteRTCP_CNAME() failed to retrieve remote RTCP CNAME");
3217         return -1;
3218     }
3219     strcpy(cName, cname);
3220     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3221                  VoEId(_instanceId, _channelId),
3222                  "GetRemoteRTCP_CNAME() => cName=%s", cName);
3223     return 0;
3224 }
3225
3226 int
3227 Channel::GetRemoteRTCPData(
3228     unsigned int& NTPHigh,
3229     unsigned int& NTPLow,
3230     unsigned int& timestamp,
3231     unsigned int& playoutTimestamp,
3232     unsigned int* jitter,
3233     unsigned short* fractionLost)
3234 {
3235     // --- Information from sender info in received Sender Reports
3236
3237     RTCPSenderInfo senderInfo;
3238     if (_rtpRtcpModule->RemoteRTCPStat(&senderInfo) != 0)
3239     {
3240         _engineStatisticsPtr->SetLastError(
3241             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3242             "GetRemoteRTCPData() failed to retrieve sender info for remote "
3243             "side");
3244         return -1;
3245     }
3246
3247     // We only utilize 12 out of 20 bytes in the sender info (ignores packet
3248     // and octet count)
3249     NTPHigh = senderInfo.NTPseconds;
3250     NTPLow = senderInfo.NTPfraction;
3251     timestamp = senderInfo.RTPtimeStamp;
3252
3253     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3254                  VoEId(_instanceId, _channelId),
3255                  "GetRemoteRTCPData() => NTPHigh=%lu, NTPLow=%lu, "
3256                  "timestamp=%lu",
3257                  NTPHigh, NTPLow, timestamp);
3258
3259     // --- Locally derived information
3260
3261     // This value is updated on each incoming RTCP packet (0 when no packet
3262     // has been received)
3263     playoutTimestamp = playout_timestamp_rtcp_;
3264
3265     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3266                  VoEId(_instanceId, _channelId),
3267                  "GetRemoteRTCPData() => playoutTimestamp=%lu",
3268                  playout_timestamp_rtcp_);
3269
3270     if (NULL != jitter || NULL != fractionLost)
3271     {
3272         // Get all RTCP receiver report blocks that have been received on this
3273         // channel. If we receive RTP packets from a remote source we know the
3274         // remote SSRC and use the report block from him.
3275         // Otherwise use the first report block.
3276         std::vector<RTCPReportBlock> remote_stats;
3277         if (_rtpRtcpModule->RemoteRTCPStat(&remote_stats) != 0 ||
3278             remote_stats.empty()) {
3279           WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3280                        VoEId(_instanceId, _channelId),
3281                        "GetRemoteRTCPData() failed to measure statistics due"
3282                        " to lack of received RTP and/or RTCP packets");
3283           return -1;
3284         }
3285
3286         uint32_t remoteSSRC = rtp_receiver_->SSRC();
3287         std::vector<RTCPReportBlock>::const_iterator it = remote_stats.begin();
3288         for (; it != remote_stats.end(); ++it) {
3289           if (it->remoteSSRC == remoteSSRC)
3290             break;
3291         }
3292
3293         if (it == remote_stats.end()) {
3294           // If we have not received any RTCP packets from this SSRC it probably
3295           // means that we have not received any RTP packets.
3296           // Use the first received report block instead.
3297           it = remote_stats.begin();
3298           remoteSSRC = it->remoteSSRC;
3299         }
3300
3301         if (jitter) {
3302           *jitter = it->jitter;
3303           WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3304                        VoEId(_instanceId, _channelId),
3305                        "GetRemoteRTCPData() => jitter = %lu", *jitter);
3306         }
3307
3308         if (fractionLost) {
3309           *fractionLost = it->fractionLost;
3310           WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3311                        VoEId(_instanceId, _channelId),
3312                        "GetRemoteRTCPData() => fractionLost = %lu",
3313                        *fractionLost);
3314         }
3315     }
3316     return 0;
3317 }
3318
3319 int
3320 Channel::SendApplicationDefinedRTCPPacket(unsigned char subType,
3321                                              unsigned int name,
3322                                              const char* data,
3323                                              unsigned short dataLengthInBytes)
3324 {
3325     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3326                  "Channel::SendApplicationDefinedRTCPPacket()");
3327     if (!channel_state_.Get().sending)
3328     {
3329         _engineStatisticsPtr->SetLastError(
3330             VE_NOT_SENDING, kTraceError,
3331             "SendApplicationDefinedRTCPPacket() not sending");
3332         return -1;
3333     }
3334     if (NULL == data)
3335     {
3336         _engineStatisticsPtr->SetLastError(
3337             VE_INVALID_ARGUMENT, kTraceError,
3338             "SendApplicationDefinedRTCPPacket() invalid data value");
3339         return -1;
3340     }
3341     if (dataLengthInBytes % 4 != 0)
3342     {
3343         _engineStatisticsPtr->SetLastError(
3344             VE_INVALID_ARGUMENT, kTraceError,
3345             "SendApplicationDefinedRTCPPacket() invalid length value");
3346         return -1;
3347     }
3348     RTCPMethod status = _rtpRtcpModule->RTCP();
3349     if (status == kRtcpOff)
3350     {
3351         _engineStatisticsPtr->SetLastError(
3352             VE_RTCP_ERROR, kTraceError,
3353             "SendApplicationDefinedRTCPPacket() RTCP is disabled");
3354         return -1;
3355     }
3356
3357     // Create and schedule the RTCP APP packet for transmission
3358     if (_rtpRtcpModule->SetRTCPApplicationSpecificData(
3359         subType,
3360         name,
3361         (const unsigned char*) data,
3362         dataLengthInBytes) != 0)
3363     {
3364         _engineStatisticsPtr->SetLastError(
3365             VE_SEND_ERROR, kTraceError,
3366             "SendApplicationDefinedRTCPPacket() failed to send RTCP packet");
3367         return -1;
3368     }
3369     return 0;
3370 }
3371
3372 int
3373 Channel::GetRTPStatistics(
3374         unsigned int& averageJitterMs,
3375         unsigned int& maxJitterMs,
3376         unsigned int& discardedPackets)
3377 {
3378     // The jitter statistics is updated for each received RTP packet and is
3379     // based on received packets.
3380     if (_rtpRtcpModule->RTCP() == kRtcpOff) {
3381       // If RTCP is off, there is no timed thread in the RTCP module regularly
3382       // generating new stats, trigger the update manually here instead.
3383       StreamStatistician* statistician =
3384           rtp_receive_statistics_->GetStatistician(rtp_receiver_->SSRC());
3385       if (statistician) {
3386         // Don't use returned statistics, use data from proxy instead so that
3387         // max jitter can be fetched atomically.
3388         RtcpStatistics s;
3389         statistician->GetStatistics(&s, true);
3390       }
3391     }
3392
3393     ChannelStatistics stats = statistics_proxy_->GetStats();
3394     const int32_t playoutFrequency = audio_coding_->PlayoutFrequency();
3395     if (playoutFrequency > 0) {
3396       // Scale RTP statistics given the current playout frequency
3397       maxJitterMs = stats.max_jitter / (playoutFrequency / 1000);
3398       averageJitterMs = stats.rtcp.jitter / (playoutFrequency / 1000);
3399     }
3400
3401     discardedPackets = _numberOfDiscardedPackets;
3402
3403     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3404                VoEId(_instanceId, _channelId),
3405                "GetRTPStatistics() => averageJitterMs = %lu, maxJitterMs = %lu,"
3406                " discardedPackets = %lu)",
3407                averageJitterMs, maxJitterMs, discardedPackets);
3408     return 0;
3409 }
3410
3411 int Channel::GetRemoteRTCPReportBlocks(
3412     std::vector<ReportBlock>* report_blocks) {
3413   if (report_blocks == NULL) {
3414     _engineStatisticsPtr->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
3415       "GetRemoteRTCPReportBlock()s invalid report_blocks.");
3416     return -1;
3417   }
3418
3419   // Get the report blocks from the latest received RTCP Sender or Receiver
3420   // Report. Each element in the vector contains the sender's SSRC and a
3421   // report block according to RFC 3550.
3422   std::vector<RTCPReportBlock> rtcp_report_blocks;
3423   if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
3424     _engineStatisticsPtr->SetLastError(VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3425         "GetRemoteRTCPReportBlocks() failed to read RTCP SR/RR report block.");
3426     return -1;
3427   }
3428
3429   if (rtcp_report_blocks.empty())
3430     return 0;
3431
3432   std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
3433   for (; it != rtcp_report_blocks.end(); ++it) {
3434     ReportBlock report_block;
3435     report_block.sender_SSRC = it->remoteSSRC;
3436     report_block.source_SSRC = it->sourceSSRC;
3437     report_block.fraction_lost = it->fractionLost;
3438     report_block.cumulative_num_packets_lost = it->cumulativeLost;
3439     report_block.extended_highest_sequence_number = it->extendedHighSeqNum;
3440     report_block.interarrival_jitter = it->jitter;
3441     report_block.last_SR_timestamp = it->lastSR;
3442     report_block.delay_since_last_SR = it->delaySinceLastSR;
3443     report_blocks->push_back(report_block);
3444   }
3445   return 0;
3446 }
3447
3448 int
3449 Channel::GetRTPStatistics(CallStatistics& stats)
3450 {
3451     // --- RtcpStatistics
3452
3453     // The jitter statistics is updated for each received RTP packet and is
3454     // based on received packets.
3455     RtcpStatistics statistics;
3456     StreamStatistician* statistician =
3457         rtp_receive_statistics_->GetStatistician(rtp_receiver_->SSRC());
3458     if (!statistician || !statistician->GetStatistics(
3459         &statistics, _rtpRtcpModule->RTCP() == kRtcpOff)) {
3460       _engineStatisticsPtr->SetLastError(
3461           VE_CANNOT_RETRIEVE_RTP_STAT, kTraceWarning,
3462           "GetRTPStatistics() failed to read RTP statistics from the "
3463           "RTP/RTCP module");
3464     }
3465
3466     stats.fractionLost = statistics.fraction_lost;
3467     stats.cumulativeLost = statistics.cumulative_lost;
3468     stats.extendedMax = statistics.extended_max_sequence_number;
3469     stats.jitterSamples = statistics.jitter;
3470
3471     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3472                  VoEId(_instanceId, _channelId),
3473                  "GetRTPStatistics() => fractionLost=%lu, cumulativeLost=%lu,"
3474                  " extendedMax=%lu, jitterSamples=%li)",
3475                  stats.fractionLost, stats.cumulativeLost, stats.extendedMax,
3476                  stats.jitterSamples);
3477
3478     // --- RTT
3479
3480     uint16_t RTT(0);
3481     RTCPMethod method = _rtpRtcpModule->RTCP();
3482     if (method == kRtcpOff)
3483     {
3484         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3485                      VoEId(_instanceId, _channelId),
3486                      "GetRTPStatistics() RTCP is disabled => valid RTT "
3487                      "measurements cannot be retrieved");
3488     } else
3489     {
3490         // The remote SSRC will be zero if no RTP packet has been received.
3491         uint32_t remoteSSRC = rtp_receiver_->SSRC();
3492         if (remoteSSRC > 0)
3493         {
3494             uint16_t avgRTT(0);
3495             uint16_t maxRTT(0);
3496             uint16_t minRTT(0);
3497
3498             if (_rtpRtcpModule->RTT(remoteSSRC, &RTT, &avgRTT, &minRTT, &maxRTT)
3499                 != 0)
3500             {
3501                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3502                              VoEId(_instanceId, _channelId),
3503                              "GetRTPStatistics() failed to retrieve RTT from "
3504                              "the RTP/RTCP module");
3505             }
3506         } else
3507         {
3508             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3509                          VoEId(_instanceId, _channelId),
3510                          "GetRTPStatistics() failed to measure RTT since no "
3511                          "RTP packets have been received yet");
3512         }
3513     }
3514
3515     stats.rttMs = static_cast<int> (RTT);
3516
3517     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3518                  VoEId(_instanceId, _channelId),
3519                  "GetRTPStatistics() => rttMs=%d", stats.rttMs);
3520
3521     // --- Data counters
3522
3523     uint32_t bytesSent(0);
3524     uint32_t packetsSent(0);
3525     uint32_t bytesReceived(0);
3526     uint32_t packetsReceived(0);
3527
3528     if (statistician) {
3529       statistician->GetDataCounters(&bytesReceived, &packetsReceived);
3530     }
3531
3532     if (_rtpRtcpModule->DataCountersRTP(&bytesSent,
3533                                         &packetsSent) != 0)
3534     {
3535         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3536                      VoEId(_instanceId, _channelId),
3537                      "GetRTPStatistics() failed to retrieve RTP datacounters =>"
3538                      " output will not be complete");
3539     }
3540
3541     stats.bytesSent = bytesSent;
3542     stats.packetsSent = packetsSent;
3543     stats.bytesReceived = bytesReceived;
3544     stats.packetsReceived = packetsReceived;
3545
3546     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3547                  VoEId(_instanceId, _channelId),
3548                  "GetRTPStatistics() => bytesSent=%d, packetsSent=%d,"
3549                  " bytesReceived=%d, packetsReceived=%d)",
3550                  stats.bytesSent, stats.packetsSent, stats.bytesReceived,
3551                  stats.packetsReceived);
3552
3553     // --- Timestamps
3554     {
3555       CriticalSectionScoped lock(ts_stats_lock_.get());
3556       stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
3557     }
3558     return 0;
3559 }
3560
3561 int Channel::SetREDStatus(bool enable, int redPayloadtype) {
3562   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3563                "Channel::SetREDStatus()");
3564
3565   if (enable) {
3566     if (redPayloadtype < 0 || redPayloadtype > 127) {
3567       _engineStatisticsPtr->SetLastError(
3568           VE_PLTYPE_ERROR, kTraceError,
3569           "SetREDStatus() invalid RED payload type");
3570       return -1;
3571     }
3572
3573     if (SetRedPayloadType(redPayloadtype) < 0) {
3574       _engineStatisticsPtr->SetLastError(
3575           VE_CODEC_ERROR, kTraceError,
3576           "SetSecondarySendCodec() Failed to register RED ACM");
3577       return -1;
3578     }
3579   }
3580
3581   if (audio_coding_->SetREDStatus(enable) != 0) {
3582     _engineStatisticsPtr->SetLastError(
3583         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
3584         "SetREDStatus() failed to set RED state in the ACM");
3585     return -1;
3586   }
3587   return 0;
3588 }
3589
3590 int
3591 Channel::GetREDStatus(bool& enabled, int& redPayloadtype)
3592 {
3593     enabled = audio_coding_->REDStatus();
3594     if (enabled)
3595     {
3596         int8_t payloadType(0);
3597         if (_rtpRtcpModule->SendREDPayloadType(payloadType) != 0)
3598         {
3599             _engineStatisticsPtr->SetLastError(
3600                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3601                 "GetREDStatus() failed to retrieve RED PT from RTP/RTCP "
3602                 "module");
3603             return -1;
3604         }
3605         WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3606                    VoEId(_instanceId, _channelId),
3607                    "GetREDStatus() => enabled=%d, redPayloadtype=%d",
3608                    enabled, redPayloadtype);
3609         return 0;
3610     }
3611     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3612                  VoEId(_instanceId, _channelId),
3613                  "GetREDStatus() => enabled=%d", enabled);
3614     return 0;
3615 }
3616
3617 int Channel::SetCodecFECStatus(bool enable) {
3618   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3619                "Channel::SetCodecFECStatus()");
3620
3621   if (audio_coding_->SetCodecFEC(enable) != 0) {
3622     _engineStatisticsPtr->SetLastError(
3623         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
3624         "SetCodecFECStatus() failed to set FEC state");
3625     return -1;
3626   }
3627   return 0;
3628 }
3629
3630 bool Channel::GetCodecFECStatus() {
3631   bool enabled = audio_coding_->CodecFEC();
3632   WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
3633                VoEId(_instanceId, _channelId),
3634                "GetCodecFECStatus() => enabled=%d", enabled);
3635   return enabled;
3636 }
3637
3638 void Channel::SetNACKStatus(bool enable, int maxNumberOfPackets) {
3639   // None of these functions can fail.
3640   _rtpRtcpModule->SetStorePacketsStatus(enable, maxNumberOfPackets);
3641   rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
3642   rtp_receiver_->SetNACKStatus(enable ? kNackRtcp : kNackOff);
3643   if (enable)
3644     audio_coding_->EnableNack(maxNumberOfPackets);
3645   else
3646     audio_coding_->DisableNack();
3647 }
3648
3649 // Called when we are missing one or more packets.
3650 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
3651   return _rtpRtcpModule->SendNACK(sequence_numbers, length);
3652 }
3653
3654 int
3655 Channel::StartRTPDump(const char fileNameUTF8[1024],
3656                       RTPDirections direction)
3657 {
3658     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3659                  "Channel::StartRTPDump()");
3660     if ((direction != kRtpIncoming) && (direction != kRtpOutgoing))
3661     {
3662         _engineStatisticsPtr->SetLastError(
3663             VE_INVALID_ARGUMENT, kTraceError,
3664             "StartRTPDump() invalid RTP direction");
3665         return -1;
3666     }
3667     RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
3668         &_rtpDumpIn : &_rtpDumpOut;
3669     if (rtpDumpPtr == NULL)
3670     {
3671         assert(false);
3672         return -1;
3673     }
3674     if (rtpDumpPtr->IsActive())
3675     {
3676         rtpDumpPtr->Stop();
3677     }
3678     if (rtpDumpPtr->Start(fileNameUTF8) != 0)
3679     {
3680         _engineStatisticsPtr->SetLastError(
3681             VE_BAD_FILE, kTraceError,
3682             "StartRTPDump() failed to create file");
3683         return -1;
3684     }
3685     return 0;
3686 }
3687
3688 int
3689 Channel::StopRTPDump(RTPDirections direction)
3690 {
3691     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3692                  "Channel::StopRTPDump()");
3693     if ((direction != kRtpIncoming) && (direction != kRtpOutgoing))
3694     {
3695         _engineStatisticsPtr->SetLastError(
3696             VE_INVALID_ARGUMENT, kTraceError,
3697             "StopRTPDump() invalid RTP direction");
3698         return -1;
3699     }
3700     RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
3701         &_rtpDumpIn : &_rtpDumpOut;
3702     if (rtpDumpPtr == NULL)
3703     {
3704         assert(false);
3705         return -1;
3706     }
3707     if (!rtpDumpPtr->IsActive())
3708     {
3709         return 0;
3710     }
3711     return rtpDumpPtr->Stop();
3712 }
3713
3714 bool
3715 Channel::RTPDumpIsActive(RTPDirections direction)
3716 {
3717     if ((direction != kRtpIncoming) &&
3718         (direction != kRtpOutgoing))
3719     {
3720         _engineStatisticsPtr->SetLastError(
3721             VE_INVALID_ARGUMENT, kTraceError,
3722             "RTPDumpIsActive() invalid RTP direction");
3723         return false;
3724     }
3725     RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
3726         &_rtpDumpIn : &_rtpDumpOut;
3727     return rtpDumpPtr->IsActive();
3728 }
3729
3730 void Channel::SetVideoEngineBWETarget(ViENetwork* vie_network,
3731                                       int video_channel) {
3732   CriticalSectionScoped cs(&_callbackCritSect);
3733   if (vie_network_) {
3734     vie_network_->Release();
3735     vie_network_ = NULL;
3736   }
3737   video_channel_ = -1;
3738
3739   if (vie_network != NULL && video_channel != -1) {
3740     vie_network_ = vie_network;
3741     video_channel_ = video_channel;
3742   }
3743 }
3744
3745 uint32_t
3746 Channel::Demultiplex(const AudioFrame& audioFrame)
3747 {
3748     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3749                  "Channel::Demultiplex()");
3750     _audioFrame.CopyFrom(audioFrame);
3751     _audioFrame.id_ = _channelId;
3752     return 0;
3753 }
3754
3755 void Channel::Demultiplex(const int16_t* audio_data,
3756                           int sample_rate,
3757                           int number_of_frames,
3758                           int number_of_channels) {
3759   CodecInst codec;
3760   GetSendCodec(codec);
3761
3762   if (!mono_recording_audio_.get()) {
3763     // Temporary space for DownConvertToCodecFormat.
3764     mono_recording_audio_.reset(new int16_t[kMaxMonoDataSizeSamples]);
3765   }
3766   DownConvertToCodecFormat(audio_data,
3767                            number_of_frames,
3768                            number_of_channels,
3769                            sample_rate,
3770                            codec.channels,
3771                            codec.plfreq,
3772                            mono_recording_audio_.get(),
3773                            &input_resampler_,
3774                            &_audioFrame);
3775 }
3776
3777 uint32_t
3778 Channel::PrepareEncodeAndSend(int mixingFrequency)
3779 {
3780     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3781                  "Channel::PrepareEncodeAndSend()");
3782
3783     if (_audioFrame.samples_per_channel_ == 0)
3784     {
3785         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
3786                      "Channel::PrepareEncodeAndSend() invalid audio frame");
3787         return 0xFFFFFFFF;
3788     }
3789
3790     if (channel_state_.Get().input_file_playing)
3791     {
3792         MixOrReplaceAudioWithFile(mixingFrequency);
3793     }
3794
3795     bool is_muted = Mute();  // Cache locally as Mute() takes a lock.
3796     if (is_muted) {
3797       AudioFrameOperations::Mute(_audioFrame);
3798     }
3799
3800     if (channel_state_.Get().input_external_media)
3801     {
3802         CriticalSectionScoped cs(&_callbackCritSect);
3803         const bool isStereo = (_audioFrame.num_channels_ == 2);
3804         if (_inputExternalMediaCallbackPtr)
3805         {
3806             _inputExternalMediaCallbackPtr->Process(
3807                 _channelId,
3808                 kRecordingPerChannel,
3809                (int16_t*)_audioFrame.data_,
3810                 _audioFrame.samples_per_channel_,
3811                 _audioFrame.sample_rate_hz_,
3812                 isStereo);
3813         }
3814     }
3815
3816     InsertInbandDtmfTone();
3817
3818     if (_includeAudioLevelIndication) {
3819       int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
3820       if (is_muted) {
3821         rms_level_.ProcessMuted(length);
3822       } else {
3823         rms_level_.Process(_audioFrame.data_, length);
3824       }
3825     }
3826
3827     return 0;
3828 }
3829
3830 uint32_t
3831 Channel::EncodeAndSend()
3832 {
3833     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3834                  "Channel::EncodeAndSend()");
3835
3836     assert(_audioFrame.num_channels_ <= 2);
3837     if (_audioFrame.samples_per_channel_ == 0)
3838     {
3839         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
3840                      "Channel::EncodeAndSend() invalid audio frame");
3841         return 0xFFFFFFFF;
3842     }
3843
3844     _audioFrame.id_ = _channelId;
3845
3846     // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
3847
3848     // The ACM resamples internally.
3849     _audioFrame.timestamp_ = _timeStamp;
3850     if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) != 0)
3851     {
3852         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
3853                      "Channel::EncodeAndSend() ACM encoding failed");
3854         return 0xFFFFFFFF;
3855     }
3856
3857     _timeStamp += _audioFrame.samples_per_channel_;
3858
3859     // --- Encode if complete frame is ready
3860
3861     // This call will trigger AudioPacketizationCallback::SendData if encoding
3862     // is done and payload is ready for packetization and transmission.
3863     return audio_coding_->Process();
3864 }
3865
3866 int Channel::RegisterExternalMediaProcessing(
3867     ProcessingTypes type,
3868     VoEMediaProcess& processObject)
3869 {
3870     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3871                  "Channel::RegisterExternalMediaProcessing()");
3872
3873     CriticalSectionScoped cs(&_callbackCritSect);
3874
3875     if (kPlaybackPerChannel == type)
3876     {
3877         if (_outputExternalMediaCallbackPtr)
3878         {
3879             _engineStatisticsPtr->SetLastError(
3880                 VE_INVALID_OPERATION, kTraceError,
3881                 "Channel::RegisterExternalMediaProcessing() "
3882                 "output external media already enabled");
3883             return -1;
3884         }
3885         _outputExternalMediaCallbackPtr = &processObject;
3886         _outputExternalMedia = true;
3887     }
3888     else if (kRecordingPerChannel == type)
3889     {
3890         if (_inputExternalMediaCallbackPtr)
3891         {
3892             _engineStatisticsPtr->SetLastError(
3893                 VE_INVALID_OPERATION, kTraceError,
3894                 "Channel::RegisterExternalMediaProcessing() "
3895                 "output external media already enabled");
3896             return -1;
3897         }
3898         _inputExternalMediaCallbackPtr = &processObject;
3899         channel_state_.SetInputExternalMedia(true);
3900     }
3901     return 0;
3902 }
3903
3904 int Channel::DeRegisterExternalMediaProcessing(ProcessingTypes type)
3905 {
3906     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3907                  "Channel::DeRegisterExternalMediaProcessing()");
3908
3909     CriticalSectionScoped cs(&_callbackCritSect);
3910
3911     if (kPlaybackPerChannel == type)
3912     {
3913         if (!_outputExternalMediaCallbackPtr)
3914         {
3915             _engineStatisticsPtr->SetLastError(
3916                 VE_INVALID_OPERATION, kTraceWarning,
3917                 "Channel::DeRegisterExternalMediaProcessing() "
3918                 "output external media already disabled");
3919             return 0;
3920         }
3921         _outputExternalMedia = false;
3922         _outputExternalMediaCallbackPtr = NULL;
3923     }
3924     else if (kRecordingPerChannel == type)
3925     {
3926         if (!_inputExternalMediaCallbackPtr)
3927         {
3928             _engineStatisticsPtr->SetLastError(
3929                 VE_INVALID_OPERATION, kTraceWarning,
3930                 "Channel::DeRegisterExternalMediaProcessing() "
3931                 "input external media already disabled");
3932             return 0;
3933         }
3934         channel_state_.SetInputExternalMedia(false);
3935         _inputExternalMediaCallbackPtr = NULL;
3936     }
3937
3938     return 0;
3939 }
3940
3941 int Channel::SetExternalMixing(bool enabled) {
3942     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3943                  "Channel::SetExternalMixing(enabled=%d)", enabled);
3944
3945     if (channel_state_.Get().playing)
3946     {
3947         _engineStatisticsPtr->SetLastError(
3948             VE_INVALID_OPERATION, kTraceError,
3949             "Channel::SetExternalMixing() "
3950             "external mixing cannot be changed while playing.");
3951         return -1;
3952     }
3953
3954     _externalMixing = enabled;
3955
3956     return 0;
3957 }
3958
3959 int
3960 Channel::GetNetworkStatistics(NetworkStatistics& stats)
3961 {
3962     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3963                  "Channel::GetNetworkStatistics()");
3964     ACMNetworkStatistics acm_stats;
3965     int return_value = audio_coding_->NetworkStatistics(&acm_stats);
3966     if (return_value >= 0) {
3967       memcpy(&stats, &acm_stats, sizeof(NetworkStatistics));
3968     }
3969     return return_value;
3970 }
3971
3972 void Channel::GetDecodingCallStatistics(AudioDecodingCallStats* stats) const {
3973   audio_coding_->GetDecodingCallStatistics(stats);
3974 }
3975
3976 bool Channel::GetDelayEstimate(int* jitter_buffer_delay_ms,
3977                                int* playout_buffer_delay_ms) const {
3978   if (_average_jitter_buffer_delay_us == 0) {
3979     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3980                  "Channel::GetDelayEstimate() no valid estimate.");
3981     return false;
3982   }
3983   *jitter_buffer_delay_ms = (_average_jitter_buffer_delay_us + 500) / 1000 +
3984       _recPacketDelayMs;
3985   *playout_buffer_delay_ms = playout_delay_ms_;
3986   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3987                "Channel::GetDelayEstimate()");
3988   return true;
3989 }
3990
3991 int Channel::SetInitialPlayoutDelay(int delay_ms)
3992 {
3993   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3994                "Channel::SetInitialPlayoutDelay()");
3995   if ((delay_ms < kVoiceEngineMinMinPlayoutDelayMs) ||
3996       (delay_ms > kVoiceEngineMaxMinPlayoutDelayMs))
3997   {
3998     _engineStatisticsPtr->SetLastError(
3999         VE_INVALID_ARGUMENT, kTraceError,
4000         "SetInitialPlayoutDelay() invalid min delay");
4001     return -1;
4002   }
4003   if (audio_coding_->SetInitialPlayoutDelay(delay_ms) != 0)
4004   {
4005     _engineStatisticsPtr->SetLastError(
4006         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4007         "SetInitialPlayoutDelay() failed to set min playout delay");
4008     return -1;
4009   }
4010   return 0;
4011 }
4012
4013
4014 int
4015 Channel::SetMinimumPlayoutDelay(int delayMs)
4016 {
4017     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
4018                  "Channel::SetMinimumPlayoutDelay()");
4019     if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
4020         (delayMs > kVoiceEngineMaxMinPlayoutDelayMs))
4021     {
4022         _engineStatisticsPtr->SetLastError(
4023             VE_INVALID_ARGUMENT, kTraceError,
4024             "SetMinimumPlayoutDelay() invalid min delay");
4025         return -1;
4026     }
4027     if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0)
4028     {
4029         _engineStatisticsPtr->SetLastError(
4030             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4031             "SetMinimumPlayoutDelay() failed to set min playout delay");
4032         return -1;
4033     }
4034     return 0;
4035 }
4036
4037 void Channel::UpdatePlayoutTimestamp(bool rtcp) {
4038   uint32_t playout_timestamp = 0;
4039
4040   if (audio_coding_->PlayoutTimestamp(&playout_timestamp) == -1)  {
4041     // This can happen if this channel has not been received any RTP packet. In
4042     // this case, NetEq is not capable of computing playout timestamp.
4043     return;
4044   }
4045
4046   uint16_t delay_ms = 0;
4047   if (_audioDeviceModulePtr->PlayoutDelay(&delay_ms) == -1) {
4048     WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
4049                  "Channel::UpdatePlayoutTimestamp() failed to read playout"
4050                  " delay from the ADM");
4051     _engineStatisticsPtr->SetLastError(
4052         VE_CANNOT_RETRIEVE_VALUE, kTraceError,
4053         "UpdatePlayoutTimestamp() failed to retrieve playout delay");
4054     return;
4055   }
4056
4057   jitter_buffer_playout_timestamp_ = playout_timestamp;
4058
4059   // Remove the playout delay.
4060   playout_timestamp -= (delay_ms * (GetPlayoutFrequency() / 1000));
4061
4062   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
4063                "Channel::UpdatePlayoutTimestamp() => playoutTimestamp = %lu",
4064                playout_timestamp);
4065
4066   if (rtcp) {
4067     playout_timestamp_rtcp_ = playout_timestamp;
4068   } else {
4069     playout_timestamp_rtp_ = playout_timestamp;
4070   }
4071   playout_delay_ms_ = delay_ms;
4072 }
4073
4074 int Channel::GetPlayoutTimestamp(unsigned int& timestamp) {
4075   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
4076                "Channel::GetPlayoutTimestamp()");
4077   if (playout_timestamp_rtp_ == 0)  {
4078     _engineStatisticsPtr->SetLastError(
4079         VE_CANNOT_RETRIEVE_VALUE, kTraceError,
4080         "GetPlayoutTimestamp() failed to retrieve timestamp");
4081     return -1;
4082   }
4083   timestamp = playout_timestamp_rtp_;
4084   WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
4085                VoEId(_instanceId,_channelId),
4086                "GetPlayoutTimestamp() => timestamp=%u", timestamp);
4087   return 0;
4088 }
4089
4090 int
4091 Channel::SetInitTimestamp(unsigned int timestamp)
4092 {
4093     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
4094                "Channel::SetInitTimestamp()");
4095     if (channel_state_.Get().sending)
4096     {
4097         _engineStatisticsPtr->SetLastError(
4098             VE_SENDING, kTraceError, "SetInitTimestamp() already sending");
4099         return -1;
4100     }
4101     if (_rtpRtcpModule->SetStartTimestamp(timestamp) != 0)
4102     {
4103         _engineStatisticsPtr->SetLastError(
4104             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
4105             "SetInitTimestamp() failed to set timestamp");
4106         return -1;
4107     }
4108     return 0;
4109 }
4110
4111 int
4112 Channel::SetInitSequenceNumber(short sequenceNumber)
4113 {
4114     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
4115                  "Channel::SetInitSequenceNumber()");
4116     if (channel_state_.Get().sending)
4117     {
4118         _engineStatisticsPtr->SetLastError(
4119             VE_SENDING, kTraceError,
4120             "SetInitSequenceNumber() already sending");
4121         return -1;
4122     }
4123     if (_rtpRtcpModule->SetSequenceNumber(sequenceNumber) != 0)
4124     {
4125         _engineStatisticsPtr->SetLastError(
4126             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
4127             "SetInitSequenceNumber() failed to set sequence number");
4128         return -1;
4129     }
4130     return 0;
4131 }
4132
4133 int
4134 Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const
4135 {
4136     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
4137                  "Channel::GetRtpRtcp()");
4138     *rtpRtcpModule = _rtpRtcpModule.get();
4139     *rtp_receiver = rtp_receiver_.get();
4140     return 0;
4141 }
4142
4143 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
4144 // a shared helper.
4145 int32_t
4146 Channel::MixOrReplaceAudioWithFile(int mixingFrequency)
4147 {
4148     scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
4149     int fileSamples(0);
4150
4151     {
4152         CriticalSectionScoped cs(&_fileCritSect);
4153
4154         if (_inputFilePlayerPtr == NULL)
4155         {
4156             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
4157                          VoEId(_instanceId, _channelId),
4158                          "Channel::MixOrReplaceAudioWithFile() fileplayer"
4159                              " doesnt exist");
4160             return -1;
4161         }
4162
4163         if (_inputFilePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
4164                                                       fileSamples,
4165                                                       mixingFrequency) == -1)
4166         {
4167             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
4168                          VoEId(_instanceId, _channelId),
4169                          "Channel::MixOrReplaceAudioWithFile() file mixing "
4170                          "failed");
4171             return -1;
4172         }
4173         if (fileSamples == 0)
4174         {
4175             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
4176                          VoEId(_instanceId, _channelId),
4177                          "Channel::MixOrReplaceAudioWithFile() file is ended");
4178             return 0;
4179         }
4180     }
4181
4182     assert(_audioFrame.samples_per_channel_ == fileSamples);
4183
4184     if (_mixFileWithMicrophone)
4185     {
4186         // Currently file stream is always mono.
4187         // TODO(xians): Change the code when FilePlayer supports real stereo.
4188         MixWithSat(_audioFrame.data_,
4189                    _audioFrame.num_channels_,
4190                    fileBuffer.get(),
4191                    1,
4192                    fileSamples);
4193     }
4194     else
4195     {
4196         // Replace ACM audio with file.
4197         // Currently file stream is always mono.
4198         // TODO(xians): Change the code when FilePlayer supports real stereo.
4199         _audioFrame.UpdateFrame(_channelId,
4200                                 0xFFFFFFFF,
4201                                 fileBuffer.get(),
4202                                 fileSamples,
4203                                 mixingFrequency,
4204                                 AudioFrame::kNormalSpeech,
4205                                 AudioFrame::kVadUnknown,
4206                                 1);
4207
4208     }
4209     return 0;
4210 }
4211
4212 int32_t
4213 Channel::MixAudioWithFile(AudioFrame& audioFrame,
4214                           int mixingFrequency)
4215 {
4216     assert(mixingFrequency <= 48000);
4217
4218     scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]);
4219     int fileSamples(0);
4220
4221     {
4222         CriticalSectionScoped cs(&_fileCritSect);
4223
4224         if (_outputFilePlayerPtr == NULL)
4225         {
4226             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
4227                          VoEId(_instanceId, _channelId),
4228                          "Channel::MixAudioWithFile() file mixing failed");
4229             return -1;
4230         }
4231
4232         // We should get the frequency we ask for.
4233         if (_outputFilePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
4234                                                        fileSamples,
4235                                                        mixingFrequency) == -1)
4236         {
4237             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
4238                          VoEId(_instanceId, _channelId),
4239                          "Channel::MixAudioWithFile() file mixing failed");
4240             return -1;
4241         }
4242     }
4243
4244     if (audioFrame.samples_per_channel_ == fileSamples)
4245     {
4246         // Currently file stream is always mono.
4247         // TODO(xians): Change the code when FilePlayer supports real stereo.
4248         MixWithSat(audioFrame.data_,
4249                    audioFrame.num_channels_,
4250                    fileBuffer.get(),
4251                    1,
4252                    fileSamples);
4253     }
4254     else
4255     {
4256         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
4257             "Channel::MixAudioWithFile() samples_per_channel_(%d) != "
4258             "fileSamples(%d)",
4259             audioFrame.samples_per_channel_, fileSamples);
4260         return -1;
4261     }
4262
4263     return 0;
4264 }
4265
4266 int
4267 Channel::InsertInbandDtmfTone()
4268 {
4269     // Check if we should start a new tone.
4270     if (_inbandDtmfQueue.PendingDtmf() &&
4271         !_inbandDtmfGenerator.IsAddingTone() &&
4272         _inbandDtmfGenerator.DelaySinceLastTone() >
4273         kMinTelephoneEventSeparationMs)
4274     {
4275         int8_t eventCode(0);
4276         uint16_t lengthMs(0);
4277         uint8_t attenuationDb(0);
4278
4279         eventCode = _inbandDtmfQueue.NextDtmf(&lengthMs, &attenuationDb);
4280         _inbandDtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb);
4281         if (_playInbandDtmfEvent)
4282         {
4283             // Add tone to output mixer using a reduced length to minimize
4284             // risk of echo.
4285             _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs - 80,
4286                                           attenuationDb);
4287         }
4288     }
4289
4290     if (_inbandDtmfGenerator.IsAddingTone())
4291     {
4292         uint16_t frequency(0);
4293         _inbandDtmfGenerator.GetSampleRate(frequency);
4294
4295         if (frequency != _audioFrame.sample_rate_hz_)
4296         {
4297             // Update sample rate of Dtmf tone since the mixing frequency
4298             // has changed.
4299             _inbandDtmfGenerator.SetSampleRate(
4300                 (uint16_t) (_audioFrame.sample_rate_hz_));
4301             // Reset the tone to be added taking the new sample rate into
4302             // account.
4303             _inbandDtmfGenerator.ResetTone();
4304         }
4305
4306         int16_t toneBuffer[320];
4307         uint16_t toneSamples(0);
4308         // Get 10ms tone segment and set time since last tone to zero
4309         if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
4310         {
4311             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
4312                        VoEId(_instanceId, _channelId),
4313                        "Channel::EncodeAndSend() inserting Dtmf failed");
4314             return -1;
4315         }
4316
4317         // Replace mixed audio with DTMF tone.
4318         for (int sample = 0;
4319             sample < _audioFrame.samples_per_channel_;
4320             sample++)
4321         {
4322             for (int channel = 0;
4323                 channel < _audioFrame.num_channels_;
4324                 channel++)
4325             {
4326                 const int index = sample * _audioFrame.num_channels_ + channel;
4327                 _audioFrame.data_[index] = toneBuffer[sample];
4328             }
4329         }
4330
4331         assert(_audioFrame.samples_per_channel_ == toneSamples);
4332     } else
4333     {
4334         // Add 10ms to "delay-since-last-tone" counter
4335         _inbandDtmfGenerator.UpdateDelaySinceLastTone();
4336     }
4337     return 0;
4338 }
4339
4340 int32_t
4341 Channel::SendPacketRaw(const void *data, int len, bool RTCP)
4342 {
4343     CriticalSectionScoped cs(&_callbackCritSect);
4344     if (_transportPtr == NULL)
4345     {
4346         return -1;
4347     }
4348     if (!RTCP)
4349     {
4350         return _transportPtr->SendPacket(_channelId, data, len);
4351     }
4352     else
4353     {
4354         return _transportPtr->SendRTCPPacket(_channelId, data, len);
4355     }
4356 }
4357
4358 // Called for incoming RTP packets after successful RTP header parsing.
4359 void Channel::UpdatePacketDelay(uint32_t rtp_timestamp,
4360                                 uint16_t sequence_number) {
4361   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
4362                "Channel::UpdatePacketDelay(timestamp=%lu, sequenceNumber=%u)",
4363                rtp_timestamp, sequence_number);
4364
4365   // Get frequency of last received payload
4366   int rtp_receive_frequency = GetPlayoutFrequency();
4367
4368   // Update the least required delay.
4369   least_required_delay_ms_ = audio_coding_->LeastRequiredDelayMs();
4370
4371   // |jitter_buffer_playout_timestamp_| updated in UpdatePlayoutTimestamp for
4372   // every incoming packet.
4373   uint32_t timestamp_diff_ms = (rtp_timestamp -
4374       jitter_buffer_playout_timestamp_) / (rtp_receive_frequency / 1000);
4375   if (!IsNewerTimestamp(rtp_timestamp, jitter_buffer_playout_timestamp_) ||
4376       timestamp_diff_ms > (2 * kVoiceEngineMaxMinPlayoutDelayMs)) {
4377     // If |jitter_buffer_playout_timestamp_| is newer than the incoming RTP
4378     // timestamp, the resulting difference is negative, but is set to zero.
4379     // This can happen when a network glitch causes a packet to arrive late,
4380     // and during long comfort noise periods with clock drift.
4381     timestamp_diff_ms = 0;
4382   }
4383
4384   uint16_t packet_delay_ms = (rtp_timestamp - _previousTimestamp) /
4385       (rtp_receive_frequency / 1000);
4386
4387   _previousTimestamp = rtp_timestamp;
4388
4389   if (timestamp_diff_ms == 0) return;
4390
4391   if (packet_delay_ms >= 10 && packet_delay_ms <= 60) {
4392     _recPacketDelayMs = packet_delay_ms;
4393   }
4394
4395   if (_average_jitter_buffer_delay_us == 0) {
4396     _average_jitter_buffer_delay_us = timestamp_diff_ms * 1000;
4397     return;
4398   }
4399
4400   // Filter average delay value using exponential filter (alpha is
4401   // 7/8). We derive 1000 *_average_jitter_buffer_delay_us here (reduces
4402   // risk of rounding error) and compensate for it in GetDelayEstimate()
4403   // later.
4404   _average_jitter_buffer_delay_us = (_average_jitter_buffer_delay_us * 7 +
4405       1000 * timestamp_diff_ms + 500) / 8;
4406 }
4407
4408 void
4409 Channel::RegisterReceiveCodecsToRTPModule()
4410 {
4411     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
4412                  "Channel::RegisterReceiveCodecsToRTPModule()");
4413
4414
4415     CodecInst codec;
4416     const uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
4417
4418     for (int idx = 0; idx < nSupportedCodecs; idx++)
4419     {
4420         // Open up the RTP/RTCP receiver for all supported codecs
4421         if ((audio_coding_->Codec(idx, &codec) == -1) ||
4422             (rtp_receiver_->RegisterReceivePayload(
4423                 codec.plname,
4424                 codec.pltype,
4425                 codec.plfreq,
4426                 codec.channels,
4427                 (codec.rate < 0) ? 0 : codec.rate) == -1))
4428         {
4429             WEBRTC_TRACE(
4430                          kTraceWarning,
4431                          kTraceVoice,
4432                          VoEId(_instanceId, _channelId),
4433                          "Channel::RegisterReceiveCodecsToRTPModule() unable"
4434                          " to register %s (%d/%d/%d/%d) to RTP/RTCP receiver",
4435                          codec.plname, codec.pltype, codec.plfreq,
4436                          codec.channels, codec.rate);
4437         }
4438         else
4439         {
4440             WEBRTC_TRACE(
4441                          kTraceInfo,
4442                          kTraceVoice,
4443                          VoEId(_instanceId, _channelId),
4444                          "Channel::RegisterReceiveCodecsToRTPModule() %s "
4445                          "(%d/%d/%d/%d) has been added to the RTP/RTCP "
4446                          "receiver",
4447                          codec.plname, codec.pltype, codec.plfreq,
4448                          codec.channels, codec.rate);
4449         }
4450     }
4451 }
4452
4453 int Channel::SetSecondarySendCodec(const CodecInst& codec,
4454                                    int red_payload_type) {
4455   // Sanity check for payload type.
4456   if (red_payload_type < 0 || red_payload_type > 127) {
4457     _engineStatisticsPtr->SetLastError(
4458         VE_PLTYPE_ERROR, kTraceError,
4459         "SetRedPayloadType() invalid RED payload type");
4460     return -1;
4461   }
4462
4463   if (SetRedPayloadType(red_payload_type) < 0) {
4464     _engineStatisticsPtr->SetLastError(
4465         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4466         "SetSecondarySendCodec() Failed to register RED ACM");
4467     return -1;
4468   }
4469   if (audio_coding_->RegisterSecondarySendCodec(codec) < 0) {
4470     _engineStatisticsPtr->SetLastError(
4471         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4472         "SetSecondarySendCodec() Failed to register secondary send codec in "
4473         "ACM");
4474     return -1;
4475   }
4476
4477   return 0;
4478 }
4479
4480 void Channel::RemoveSecondarySendCodec() {
4481   audio_coding_->UnregisterSecondarySendCodec();
4482 }
4483
4484 int Channel::GetSecondarySendCodec(CodecInst* codec) {
4485   if (audio_coding_->SecondarySendCodec(codec) < 0) {
4486     _engineStatisticsPtr->SetLastError(
4487         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4488         "GetSecondarySendCodec() Failed to get secondary sent codec from ACM");
4489     return -1;
4490   }
4491   return 0;
4492 }
4493
4494 // Assuming this method is called with valid payload type.
4495 int Channel::SetRedPayloadType(int red_payload_type) {
4496   CodecInst codec;
4497   bool found_red = false;
4498
4499   // Get default RED settings from the ACM database
4500   const int num_codecs = AudioCodingModule::NumberOfCodecs();
4501   for (int idx = 0; idx < num_codecs; idx++) {
4502     audio_coding_->Codec(idx, &codec);
4503     if (!STR_CASE_CMP(codec.plname, "RED")) {
4504       found_red = true;
4505       break;
4506     }
4507   }
4508
4509   if (!found_red) {
4510     _engineStatisticsPtr->SetLastError(
4511         VE_CODEC_ERROR, kTraceError,
4512         "SetRedPayloadType() RED is not supported");
4513     return -1;
4514   }
4515
4516   codec.pltype = red_payload_type;
4517   if (audio_coding_->RegisterSendCodec(codec) < 0) {
4518     _engineStatisticsPtr->SetLastError(
4519         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4520         "SetRedPayloadType() RED registration in ACM module failed");
4521     return -1;
4522   }
4523
4524   if (_rtpRtcpModule->SetSendREDPayloadType(red_payload_type) != 0) {
4525     _engineStatisticsPtr->SetLastError(
4526         VE_RTP_RTCP_MODULE_ERROR, kTraceError,
4527         "SetRedPayloadType() RED registration in RTP/RTCP module failed");
4528     return -1;
4529   }
4530   return 0;
4531 }
4532
4533 int Channel::SetSendRtpHeaderExtension(bool enable, RTPExtensionType type,
4534                                        unsigned char id) {
4535   int error = 0;
4536   _rtpRtcpModule->DeregisterSendRtpHeaderExtension(type);
4537   if (enable) {
4538     error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(type, id);
4539   }
4540   return error;
4541 }
4542
4543 int32_t Channel::GetPlayoutFrequency() {
4544   int32_t playout_frequency = audio_coding_->PlayoutFrequency();
4545   CodecInst current_recive_codec;
4546   if (audio_coding_->ReceiveCodec(&current_recive_codec) == 0) {
4547     if (STR_CASE_CMP("G722", current_recive_codec.plname) == 0) {
4548       // Even though the actual sampling rate for G.722 audio is
4549       // 16,000 Hz, the RTP clock rate for the G722 payload format is
4550       // 8,000 Hz because that value was erroneously assigned in
4551       // RFC 1890 and must remain unchanged for backward compatibility.
4552       playout_frequency = 8000;
4553     } else if (STR_CASE_CMP("opus", current_recive_codec.plname) == 0) {
4554       // We are resampling Opus internally to 32,000 Hz until all our
4555       // DSP routines can operate at 48,000 Hz, but the RTP clock
4556       // rate for the Opus payload format is standardized to 48,000 Hz,
4557       // because that is the maximum supported decoding sampling rate.
4558       playout_frequency = 48000;
4559     }
4560   }
4561   return playout_frequency;
4562 }
4563
4564 }  // namespace voe
4565 }  // namespace webrtc