#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "crypto/encryptor.h"
-#include "crypto/symmetric_key.h"
+#include "base/strings/string_piece.h"
#include "media/cast/audio_receiver/audio_decoder.h"
#include "media/cast/framer/framer.h"
+#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
#include "media/cast/rtcp/rtcp.h"
#include "media/cast/rtp_receiver/rtp_receiver.h"
+#include "media/cast/transport/cast_transport_defines.h"
+
+namespace {
+
+using media::cast::kMaxIpPacketSize;
+using media::cast::kRtcpCastLogHeaderSize;
+using media::cast::kRtcpReceiverEventLogSize;
// Max time we wait until an audio frame is due to be played out is released.
static const int64 kMaxAudioFrameWaitMs = 20;
static const int64 kMinSchedulingDelayMs = 1;
+// This is an upper bound on number of events that can fit into a single RTCP
+// packet.
+static const int64 kMaxEventSubscriberEntries =
+ (kMaxIpPacketSize - kRtcpCastLogHeaderSize) / kRtcpReceiverEventLogSize;
+
+} // namespace
+
namespace media {
namespace cast {
DecodedAudioCallbackData::DecodedAudioCallbackData()
- : number_of_10ms_blocks(0),
- desired_frequency(0),
- callback() {}
+ : number_of_10ms_blocks(0), desired_frequency(0), callback() {}
DecodedAudioCallbackData::~DecodedAudioCallbackData() {}
explicit LocalRtpAudioData(AudioReceiver* audio_receiver)
: audio_receiver_(audio_receiver) {}
- virtual void OnReceivedPayloadData(
- const uint8* payload_data,
- size_t payload_size,
- const RtpCastHeader* rtp_header) OVERRIDE {
+ virtual void OnReceivedPayloadData(const uint8* payload_data,
+ size_t payload_size,
+ const RtpCastHeader* rtp_header) OVERRIDE {
audio_receiver_->IncomingParsedRtpPacket(payload_data, payload_size,
*rtp_header);
}
class LocalRtpAudioFeedback : public RtpPayloadFeedback {
public:
explicit LocalRtpAudioFeedback(AudioReceiver* audio_receiver)
- : audio_receiver_(audio_receiver) {
- }
+ : audio_receiver_(audio_receiver) {}
virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
audio_receiver_->CastFeedback(cast_message);
class LocalRtpReceiverStatistics : public RtpReceiverStatistics {
public:
explicit LocalRtpReceiverStatistics(RtpReceiver* rtp_receiver)
- : rtp_receiver_(rtp_receiver) {
- }
+ : rtp_receiver_(rtp_receiver) {}
virtual void GetStatistics(uint8* fraction_lost,
uint32* cumulative_lost, // 24 bits valid.
uint32* extended_high_sequence_number,
uint32* jitter) OVERRIDE {
- rtp_receiver_->GetStatistics(fraction_lost,
- cumulative_lost,
- extended_high_sequence_number,
- jitter);
+ rtp_receiver_->GetStatistics(fraction_lost, cumulative_lost,
+ extended_high_sequence_number, jitter);
}
private:
const AudioReceiverConfig& audio_config,
transport::PacedPacketSender* const packet_sender)
: cast_environment_(cast_environment),
+ event_subscriber_(
+ kMaxEventSubscriberEntries,
+ ReceiverRtcpEventSubscriber::kAudioEventSubscriber),
codec_(audio_config.codec),
frequency_(audio_config.frequency),
audio_buffer_(),
if (audio_config.use_external_decoder) {
audio_buffer_.reset(new Framer(cast_environment->Clock(),
incoming_payload_feedback_.get(),
- audio_config.incoming_ssrc,
- true,
- 0));
+ audio_config.incoming_ssrc, true, 0));
} else {
- audio_decoder_.reset(new AudioDecoder(cast_environment,
- audio_config,
+ audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config,
incoming_payload_feedback_.get()));
}
- if (audio_config.aes_iv_mask.size() == kAesKeySize &&
- audio_config.aes_key.size() == kAesKeySize) {
- iv_mask_ = audio_config.aes_iv_mask;
- decryption_key_.reset(crypto::SymmetricKey::Import(
- crypto::SymmetricKey::AES, audio_config.aes_key));
- decryptor_.reset(new crypto::Encryptor());
- decryptor_->Init(decryption_key_.get(),
- crypto::Encryptor::CTR,
- std::string());
- } else if (audio_config.aes_iv_mask.size() != 0 ||
- audio_config.aes_key.size() != 0) {
- DCHECK(false) << "Invalid crypto configuration";
- }
-
+ decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask);
rtp_receiver_.reset(new RtpReceiver(cast_environment->Clock(),
&audio_config,
NULL,
new LocalRtpReceiverStatistics(rtp_receiver_.get()));
base::TimeDelta rtcp_interval_delta =
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval);
- rtcp_.reset(new Rtcp(cast_environment,
- NULL,
- NULL,
- packet_sender,
- NULL,
+ rtcp_.reset(new Rtcp(cast_environment, NULL, NULL, packet_sender, NULL,
rtp_audio_receiver_statistics_.get(),
- audio_config.rtcp_mode,
- rtcp_interval_delta,
- audio_config.feedback_ssrc,
- audio_config.incoming_ssrc,
+ audio_config.rtcp_mode, rtcp_interval_delta,
+ audio_config.feedback_ssrc, audio_config.incoming_ssrc,
audio_config.rtcp_c_name));
+ cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
}
-AudioReceiver::~AudioReceiver() {}
+AudioReceiver::~AudioReceiver() {
+ cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
+}
void AudioReceiver::InitializeTimers() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- cast_environment_->Logging()->InsertPacketEvent(now, kAudioPacketReceived,
- rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
- rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
+ cast_environment_->Logging()->InsertPacketEvent(
+ now, kAudioPacketReceived, rtp_header.webrtc.header.timestamp,
+ rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id,
+ payload_size);
// TODO(pwestin): update this as video to refresh over time.
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (audio_decoder_) {
DCHECK(!audio_buffer_) << "Invalid internal state";
- std::string plaintext(reinterpret_cast<const char*>(payload_data),
- payload_size);
- if (decryptor_) {
- plaintext.clear();
- if (!decryptor_->SetCounter(GetAesNonce(rtp_header.frame_id, iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
- return;
- }
- if (!decryptor_->Decrypt(base::StringPiece(reinterpret_cast<const char*>(
- payload_data), payload_size), &plaintext)) {
- VLOG(1) << "Decryption error";
+ std::string plaintext;
+ if (decryptor_.initialized()) {
+ if (!decryptor_.Decrypt(
+ rtp_header.frame_id,
+ base::StringPiece(reinterpret_cast<const char*>(payload_data),
+ payload_size),
+ &plaintext))
return;
- }
+ } else {
+ plaintext.append(reinterpret_cast<const char*>(payload_data),
+ payload_size);
}
audio_decoder_->IncomingParsedRtpPacket(
reinterpret_cast<const uint8*>(plaintext.data()), plaintext.size(),
if (!queued_decoded_callbacks_.empty()) {
DecodedAudioCallbackData decoded_data = queued_decoded_callbacks_.front();
queued_decoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
- base::Bind(&AudioReceiver::DecodeAudioFrameThread,
- base::Unretained(this),
- decoded_data.number_of_10ms_blocks,
- decoded_data.desired_frequency,
- decoded_data.callback));
+ cast_environment_->PostTask(
+ CastEnvironment::AUDIO_DECODER, FROM_HERE,
+ base::Bind(&AudioReceiver::DecodeAudioFrameThread,
+ base::Unretained(this), decoded_data.number_of_10ms_blocks,
+ decoded_data.desired_frequency, decoded_data.callback));
}
return;
}
bool complete = audio_buffer_->InsertPacket(payload_data, payload_size,
rtp_header, &duplicate);
if (duplicate) {
- cast_environment_->Logging()->InsertPacketEvent(now,
- kDuplicatePacketReceived,
- rtp_header.webrtc.header.timestamp, rtp_header.frame_id,
- rtp_header.packet_id, rtp_header.max_packet_id, payload_size);
+ cast_environment_->Logging()->InsertPacketEvent(
+ now, kDuplicateAudioPacketReceived, rtp_header.webrtc.header.timestamp,
+ rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id,
+ payload_size);
// Duplicate packets are ignored.
return;
}
AudioFrameEncodedCallback callback = queued_encoded_callbacks_.front();
queued_encoded_callbacks_.pop_front();
cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
- base::Bind(&AudioReceiver::GetEncodedAudioFrame,
- weak_factory_.GetWeakPtr(), callback));
+ base::Bind(&AudioReceiver::GetEncodedAudioFrame,
+ weak_factory_.GetWeakPtr(), callback));
}
-void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks,
- int desired_frequency, const AudioFrameDecodedCallback& callback) {
+void AudioReceiver::GetRawAudioFrame(
+ int number_of_10ms_blocks, int desired_frequency,
+ const AudioFrameDecodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_decoder_) << "Invalid function call in this configuration";
// TODO(pwestin): we can skip this function by posting direct to the decoder.
- cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE,
- base::Bind(&AudioReceiver::DecodeAudioFrameThread,
- base::Unretained(this),
- number_of_10ms_blocks,
- desired_frequency,
- callback));
+ cast_environment_->PostTask(
+ CastEnvironment::AUDIO_DECODER, FROM_HERE,
+ base::Bind(&AudioReceiver::DecodeAudioFrameThread, base::Unretained(this),
+ number_of_10ms_blocks, desired_frequency, callback));
}
void AudioReceiver::DecodeAudioFrameThread(
- int number_of_10ms_blocks,
- int desired_frequency,
+ int number_of_10ms_blocks, int desired_frequency,
const AudioFrameDecodedCallback callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::AUDIO_DECODER));
// TODO(mikhal): Allow the application to allocate this memory.
uint32 rtp_timestamp = 0;
if (!audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
- desired_frequency,
- audio_frame.get(),
+ desired_frequency, audio_frame.get(),
&rtp_timestamp)) {
DecodedAudioCallbackData callback_data;
callback_data.number_of_10ms_blocks = number_of_10ms_blocks;
queued_decoded_callbacks_.push_back(callback_data);
return;
}
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioReceiver::ReturnDecodedFrameWithPlayoutDelay,
- base::Unretained(this), base::Passed(&audio_frame), rtp_timestamp,
- callback));
+ base::Unretained(this), base::Passed(&audio_frame),
+ rtp_timestamp, callback));
}
void AudioReceiver::ReturnDecodedFrameWithPlayoutDelay(
const AudioFrameDecodedCallback callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- cast_environment_->Logging()->InsertFrameEvent(now, kAudioFrameDecoded,
- rtp_timestamp, kFrameIdUnknown);
+ cast_environment_->Logging()->InsertFrameEvent(
+ now, kAudioFrameDecoded, rtp_timestamp, kFrameIdUnknown);
base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
- cast_environment_->Logging()->InsertFrameEventWithDelay(now,
- kAudioPlayoutDelay, rtp_timestamp, kFrameIdUnknown, playout_time - now);
+ cast_environment_->Logging()->InsertFrameEventWithDelay(
+ now, kAudioPlayoutDelay, rtp_timestamp, kFrameIdUnknown,
+ playout_time - now);
// Frame is ready - Send back to the caller.
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(callback, base::Passed(&audio_frame), playout_time));
}
// Already released by incoming packet.
return;
}
- uint32 rtp_timestamp = 0;
bool next_frame = false;
scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
new transport::EncodedAudioFrame());
- if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(),
- &rtp_timestamp, &next_frame)) {
+ if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), &next_frame)) {
// We have no audio frames. Wait for new packet(s).
// Since the application can post multiple AudioFrameEncodedCallback and
// we only check the next frame to play out we might have multiple timeout
return;
}
- if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
+ if (decryptor_.initialized() && !DecryptAudioFrame(&encoded_frame)) {
// Logging already done.
return;
}
- if (PostEncodedAudioFrame(queued_encoded_callbacks_.front(), rtp_timestamp,
- next_frame, &encoded_frame)) {
+ if (PostEncodedAudioFrame(
+ queued_encoded_callbacks_.front(), next_frame, &encoded_frame)) {
// Call succeed remove callback from list.
queued_encoded_callbacks_.pop_front();
}
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
- uint32 rtp_timestamp = 0;
bool next_frame = false;
scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
new transport::EncodedAudioFrame());
- if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(),
- &rtp_timestamp, &next_frame)) {
+ if (!audio_buffer_->GetEncodedAudioFrame(encoded_frame.get(), &next_frame)) {
// We have no audio frames. Wait for new packet(s).
VLOG(1) << "Wait for more audio packets in frame";
queued_encoded_callbacks_.push_back(callback);
return;
}
- if (decryptor_ && !DecryptAudioFrame(&encoded_frame)) {
+ if (decryptor_.initialized() && !DecryptAudioFrame(&encoded_frame)) {
// Logging already done.
queued_encoded_callbacks_.push_back(callback);
return;
}
- if (!PostEncodedAudioFrame(callback, rtp_timestamp, next_frame,
- &encoded_frame)) {
+ if (!PostEncodedAudioFrame(callback, next_frame, &encoded_frame)) {
// We have an audio frame; however we are missing packets and we have time
// to wait for new packet(s).
queued_encoded_callbacks_.push_back(callback);
bool AudioReceiver::PostEncodedAudioFrame(
const AudioFrameEncodedCallback& callback,
- uint32 rtp_timestamp,
bool next_frame,
scoped_ptr<transport::EncodedAudioFrame>* encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(audio_buffer_) << "Invalid function call in this configuration";
+ DCHECK(encoded_frame) << "Invalid encoded_frame";
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp);
+ base::TimeTicks playout_time =
+ GetPlayoutTime(now, (*encoded_frame)->rtp_timestamp);
base::TimeDelta time_until_playout = playout_time - now;
base::TimeDelta min_wait_delta =
base::TimeDelta::FromMilliseconds(kMaxAudioFrameWaitMs);
- if (!next_frame && (time_until_playout > min_wait_delta)) {
+ if (!next_frame && (time_until_playout > min_wait_delta)) {
base::TimeDelta time_until_release = time_until_playout - min_wait_delta;
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
time_until_release);
VLOG(1) << "Wait until time to playout:"
(*encoded_frame)->codec = codec_;
audio_buffer_->ReleaseFrame((*encoded_frame)->frame_id);
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE,
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(callback, base::Passed(encoded_frame), playout_time));
return true;
}
-void AudioReceiver::IncomingPacket(const uint8* packet, size_t length,
- const base::Closure callback) {
+void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- bool rtcp_packet = Rtcp::IsRtcpPacket(packet, length);
+ bool rtcp_packet = Rtcp::IsRtcpPacket(&packet->front(), packet->size());
if (!rtcp_packet) {
- rtp_receiver_->ReceivedPacket(packet, length);
+ rtp_receiver_->ReceivedPacket(&packet->front(), packet->size());
} else {
- rtcp_->IncomingRtcpPacket(packet, length);
+ rtcp_->IncomingRtcpPacket(&packet->front(), packet->size());
}
- cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, callback);
}
void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
- RtcpReceiverLogMessage receiver_log;
- AudioRtcpRawMap audio_logs =
- cast_environment_->Logging()->GetAudioRtcpRawData();
-
- while (!audio_logs.empty()) {
- AudioRtcpRawMap::iterator it = audio_logs.begin();
- uint32 rtp_timestamp = it->first;
- std::pair<AudioRtcpRawMap::iterator, AudioRtcpRawMap::iterator>
- frame_range = audio_logs.equal_range(rtp_timestamp);
-
- RtcpReceiverFrameLogMessage frame_log(rtp_timestamp);
-
- AudioRtcpRawMap::const_iterator event_it = frame_range.first;
- for (; event_it != frame_range.second; ++event_it) {
- RtcpReceiverEventLogMessage event_log_message;
- event_log_message.type = event_it->second.type;
- event_log_message.event_timestamp = event_it->second.timestamp;
- event_log_message.delay_delta = event_it->second.delay_delta;
- event_log_message.packet_id = event_it->second.packet_id;
- frame_log.event_log_messages_.push_back(event_log_message);
- }
- receiver_log.push_back(frame_log);
- audio_logs.erase(rtp_timestamp);
- }
-
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
cast_environment_->Logging()->InsertGenericEvent(now, kAudioAckSent,
- cast_message.ack_frame_id_);
+ cast_message.ack_frame_id_);
- rtcp_->SendRtcpFromRtpReceiver(&cast_message, &receiver_log);
+ rtcp_->SendRtcpFromRtpReceiver(&cast_message, &event_subscriber_);
}
base::TimeTicks AudioReceiver::GetPlayoutTime(base::TimeTicks now,
base::TimeDelta::FromMilliseconds(rtp_timestamp_diff / frequency_khz);
base::TimeDelta time_diff_delta = now - time_first_incoming_packet_;
- playout_time = now + std::max(rtp_time_diff_delta - time_diff_delta,
- base::TimeDelta());
+ playout_time = now + std::max(rtp_time_diff_delta - time_diff_delta,
+ base::TimeDelta());
}
}
if (playout_time.is_null()) {
// This can fail if we have not received any RTCP packets in a long time.
- playout_time = rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
- &rtp_timestamp_in_ticks) ?
- rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_ : now;
+ if (rtcp_->RtpTimestampInSenderTime(frequency_, rtp_timestamp,
+ &rtp_timestamp_in_ticks)) {
+ playout_time =
+ rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_;
+ } else {
+ playout_time = now;
+ }
}
// Don't allow the playout time to go backwards.
- if (last_playout_time_ > playout_time)
- playout_time = last_playout_time_;
+ if (last_playout_time_ > playout_time) playout_time = last_playout_time_;
last_playout_time_ = playout_time;
return playout_time;
}
bool AudioReceiver::DecryptAudioFrame(
scoped_ptr<transport::EncodedAudioFrame>* audio_frame) {
- DCHECK(decryptor_) << "Invalid state";
-
- if (!decryptor_->SetCounter(GetAesNonce((*audio_frame)->frame_id,
- iv_mask_))) {
- NOTREACHED() << "Failed to set counter";
+ if (!decryptor_.initialized())
return false;
- }
+
std::string decrypted_audio_data;
- if (!decryptor_->Decrypt((*audio_frame)->data, &decrypted_audio_data)) {
- VLOG(1) << "Decryption error";
- // Give up on this frame, release it from jitter buffer.
+ if (!decryptor_.Decrypt((*audio_frame)->frame_id,
+ (*audio_frame)->data,
+ &decrypted_audio_data)) {
+ // Give up on this frame, release it from the jitter buffer.
audio_buffer_->ReleaseFrame((*audio_frame)->frame_id);
return false;
}
void AudioReceiver::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeDelta time_to_send = rtcp_->TimeToSendNextRtcpReport() -
- cast_environment_->Clock()->NowTicks();
+ cast_environment_->Clock()->NowTicks();
- time_to_send = std::max(time_to_send,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ time_to_send = std::max(
+ time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioReceiver::SendNextRtcpReport,
- weak_factory_.GetWeakPtr()), time_to_send);
+ weak_factory_.GetWeakPtr()),
+ time_to_send);
}
void AudioReceiver::SendNextRtcpReport() {
} else {
NOTREACHED();
}
- base::TimeDelta time_to_send = send_time -
- cast_environment_->Clock()->NowTicks();
- time_to_send = std::max(time_to_send,
- base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
- cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE,
+ base::TimeDelta time_to_send =
+ send_time - cast_environment_->Clock()->NowTicks();
+ time_to_send = std::max(
+ time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN, FROM_HERE,
base::Bind(&AudioReceiver::SendNextCastMessage,
- weak_factory_.GetWeakPtr()), time_to_send);
+ weak_factory_.GetWeakPtr()),
+ time_to_send);
}
void AudioReceiver::SendNextCastMessage() {