Upstream version 11.39.250.0
[platform/framework/web/crosswalk.git] / src / content / renderer / media / webrtc_local_audio_track.cc
index ce680c0..99ada98 100644 (file)
@@ -5,44 +5,29 @@
 #include "content/renderer/media/webrtc_local_audio_track.h"
 
 #include "content/public/renderer/media_stream_audio_sink.h"
+#include "content/renderer/media/media_stream_audio_level_calculator.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
 #include "content/renderer/media/media_stream_audio_sink_owner.h"
 #include "content/renderer/media/media_stream_audio_track_sink.h"
 #include "content/renderer/media/peer_connection_audio_sink_owner.h"
 #include "content/renderer/media/webaudio_capturer_source.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
 #include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_local_audio_source_provider.h"
-#include "media/base/audio_fifo.h"
-#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
 
 namespace content {
 
-static const char kAudioTrackKind[] = "audio";
-
-scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
-    const std::string& id,
-    const scoped_refptr<WebRtcAudioCapturer>& capturer,
-    WebAudioCapturerSource* webaudio_source,
-    webrtc::AudioSourceInterface* track_source) {
-  talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track =
-      new talk_base::RefCountedObject<WebRtcLocalAudioTrack>(
-          id, capturer, webaudio_source, track_source);
-  return track;
-}
-
 WebRtcLocalAudioTrack::WebRtcLocalAudioTrack(
-    const std::string& label,
+    WebRtcLocalAudioTrackAdapter* adapter,
     const scoped_refptr<WebRtcAudioCapturer>& capturer,
-    WebAudioCapturerSource* webaudio_source,
-    webrtc::AudioSourceInterface* track_source)
-    : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
+    WebAudioCapturerSource* webaudio_source)
+    : MediaStreamTrack(adapter, true),
+      adapter_(adapter),
       capturer_(capturer),
-      webaudio_source_(webaudio_source),
-      track_source_(track_source) {
+      webaudio_source_(webaudio_source) {
   DCHECK(capturer.get() || webaudio_source);
-  if (!webaudio_source_) {
-    source_provider_.reset(new WebRtcLocalAudioSourceProvider());
-    AddSink(source_provider_.get());
-  }
+
+  adapter_->Initialize(this);
+
   DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()";
 }
 
@@ -57,16 +42,22 @@ void WebRtcLocalAudioTrack::Capture(const int16* audio_data,
                                     base::TimeDelta delay,
                                     int volume,
                                     bool key_pressed,
-                                    bool need_audio_processing) {
+                                    bool need_audio_processing,
+                                    bool force_report_nonzero_energy) {
   DCHECK(capture_thread_checker_.CalledOnValidThread());
+
+  // Calculate the signal level regardless if the track is disabled or enabled.
+  int signal_level = level_calculator_->Calculate(
+      audio_data, audio_parameters_.channels(),
+      audio_parameters_.frames_per_buffer(), force_report_nonzero_energy);
+  adapter_->SetSignalLevel(signal_level);
+
   scoped_refptr<WebRtcAudioCapturer> capturer;
-  std::vector<int> voe_channels;
   SinkList::ItemList sinks;
   SinkList::ItemList sinks_to_notify_format;
   {
     base::AutoLock auto_lock(lock_);
     capturer = capturer_;
-    voe_channels = voe_channels_;
     sinks = sinks_.Items();
     sinks_.RetrieveAndClearTags(&sinks_to_notify_format);
   }
@@ -83,6 +74,7 @@ void WebRtcLocalAudioTrack::Capture(const int16* audio_data,
   // disabled. This is currently done so to feed input to WebRTC typing
   // detection and should be changed when audio processing is moved from
   // WebRTC to the track.
+  std::vector<int> voe_channels = adapter_->VoeChannels();
   for (SinkList::ItemList::const_iterator it = sinks.begin();
        it != sinks.end();
        ++it) {
@@ -95,7 +87,7 @@ void WebRtcLocalAudioTrack::Capture(const int16* audio_data,
                                    volume,
                                    need_audio_processing,
                                    key_pressed);
-    if (new_volume != 0 && capturer.get() && !webaudio_source_) {
+    if (new_volume != 0 && capturer.get() && !webaudio_source_.get()) {
       // Feed the new volume to WebRtc while changing the volume on the
       // browser.
       capturer->SetVolume(new_volume);
@@ -112,47 +104,21 @@ void WebRtcLocalAudioTrack::OnSetFormat(
   DCHECK(capture_thread_checker_.CalledOnValidThread());
 
   audio_parameters_ = params;
+  level_calculator_.reset(new MediaStreamAudioLevelCalculator());
 
   base::AutoLock auto_lock(lock_);
   // Remember to notify all sinks of the new format.
   sinks_.TagAll();
 }
 
-void WebRtcLocalAudioTrack::AddChannel(int channel_id) {
-  DVLOG(1) << "WebRtcLocalAudioTrack::AddChannel(channel_id="
-           << channel_id << ")";
-  base::AutoLock auto_lock(lock_);
-  if (std::find(voe_channels_.begin(), voe_channels_.end(), channel_id) !=
-      voe_channels_.end()) {
-    // We need to handle the case when the same channel is connected to the
-    // track more than once.
-    return;
-  }
-
-  voe_channels_.push_back(channel_id);
-}
-
-void WebRtcLocalAudioTrack::RemoveChannel(int channel_id) {
-  DVLOG(1) << "WebRtcLocalAudioTrack::RemoveChannel(channel_id="
-           << channel_id << ")";
-  base::AutoLock auto_lock(lock_);
-  std::vector<int>::iterator iter =
-      std::find(voe_channels_.begin(), voe_channels_.end(), channel_id);
-  DCHECK(iter != voe_channels_.end());
-  voe_channels_.erase(iter);
-}
-
-// webrtc::AudioTrackInterface implementation.
-webrtc::AudioSourceInterface* WebRtcLocalAudioTrack::GetSource() const {
-  return track_source_;
-}
-
-cricket::AudioRenderer* WebRtcLocalAudioTrack::GetRenderer() {
-  return this;
-}
-
-std::string WebRtcLocalAudioTrack::kind() const {
-  return kAudioTrackKind;
+void WebRtcLocalAudioTrack::SetAudioProcessor(
+    const scoped_refptr<MediaStreamAudioProcessor>& processor) {
+  // if the |processor| does not have audio processing, which can happen if
+  // kDisableAudioTrackProcessing is set set or all the constraints in
+  // the |processor| are turned off. In such case, we pass NULL to the
+  // adapter to indicate that no stats can be gotten from the processor.
+  adapter_->SetAudioProcessor(processor->has_audio_processing() ?
+      processor : NULL);
 }
 
 void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
@@ -170,7 +136,7 @@ void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
   // we remember to call OnSetFormat() on the new sink.
   scoped_refptr<MediaStreamAudioTrackSink> sink_owner(
       new MediaStreamAudioSinkOwner(sink));
-  sinks_.AddAndTag(sink_owner);
+  sinks_.AddAndTag(sink_owner.get());
 }
 
 void WebRtcLocalAudioTrack::RemoveSink(MediaStreamAudioSink* sink) {
@@ -204,7 +170,7 @@ void WebRtcLocalAudioTrack::AddSink(PeerConnectionAudioSink* sink) {
   // we remember to call OnSetFormat() on the new sink.
   scoped_refptr<MediaStreamAudioTrackSink> sink_owner(
       new PeerConnectionAudioSinkOwner(sink));
-  sinks_.AddAndTag(sink_owner);
+  sinks_.AddAndTag(sink_owner.get());
 }
 
 void WebRtcLocalAudioTrack::RemoveSink(PeerConnectionAudioSink* sink) {