2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "webrtc/modules/video_coding/main/source/receiver.h"
15 #include "webrtc/modules/video_coding/main/source/encoded_frame.h"
16 #include "webrtc/modules/video_coding/main/source/internal_defines.h"
17 #include "webrtc/modules/video_coding/main/source/media_opt_util.h"
18 #include "webrtc/system_wrappers/interface/clock.h"
19 #include "webrtc/system_wrappers/interface/trace.h"
20 #include "webrtc/system_wrappers/interface/trace_event.h"
24 enum { kMaxReceiverDelayMs = 10000 };
26 VCMReceiver::VCMReceiver(VCMTiming* timing,
28 EventFactory* event_factory,
32 : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
35 receiver_id_(receiver_id),
37 jitter_buffer_(clock_, event_factory, vcm_id, receiver_id, master),
39 render_wait_event_(event_factory->CreateEvent()),
41 max_video_delay_ms_(kMaxVideoDelayMs) {}
43 VCMReceiver::~VCMReceiver() {
44 render_wait_event_->Set();
48 void VCMReceiver::Reset() {
49 CriticalSectionScoped cs(crit_sect_);
50 if (!jitter_buffer_.Running()) {
51 jitter_buffer_.Start();
53 jitter_buffer_.Flush();
55 render_wait_event_->Reset();
63 int32_t VCMReceiver::Initialize() {
65 CriticalSectionScoped cs(crit_sect_);
67 SetNackMode(kNoNack, -1, -1);
72 void VCMReceiver::UpdateRtt(uint32_t rtt) {
73 jitter_buffer_.UpdateRtt(rtt);
76 int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
78 uint16_t frame_height) {
79 if (packet.frameType == kVideoFrameKey) {
80 WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCoding,
81 VCMId(vcm_id_, receiver_id_),
82 "Inserting key frame packet seqnum=%u, timestamp=%u",
83 packet.seqNum, packet.timestamp);
86 // Insert the packet into the jitter buffer. The packet can either be empty or
87 // contain media at this point.
88 bool retransmitted = false;
89 const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet,
91 if (ret == kOldPacket) {
93 } else if (ret == kFlushIndicator) {
94 return VCM_FLUSH_INDICATOR;
96 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding,
97 VCMId(vcm_id_, receiver_id_),
98 "Error inserting packet seqnum=%u, timestamp=%u",
99 packet.seqNum, packet.timestamp);
100 return VCM_JITTER_BUFFER_ERROR;
102 if (ret == kCompleteSession && !retransmitted) {
103 // We don't want to include timestamps which have suffered from
104 // retransmission here, since we compensate with extra retransmission
105 // delay within the jitter estimate.
106 timing_->IncomingTimestamp(packet.timestamp, clock_->TimeInMilliseconds());
109 // Only trace the primary receiver to make it possible to parse and plot
111 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
112 VCMId(vcm_id_, receiver_id_),
113 "Packet seqnum=%u timestamp=%u inserted at %u",
114 packet.seqNum, packet.timestamp,
115 MaskWord64ToUWord32(clock_->TimeInMilliseconds()));
120 VCMEncodedFrame* VCMReceiver::FrameForDecoding(
121 uint16_t max_wait_time_ms,
122 int64_t& next_render_time_ms,
124 VCMReceiver* dual_receiver) {
125 const int64_t start_time_ms = clock_->TimeInMilliseconds();
126 uint32_t frame_timestamp = 0;
127 // Exhaust wait time to get a complete frame for decoding.
128 bool found_frame = jitter_buffer_.NextCompleteTimestamp(
129 max_wait_time_ms, &frame_timestamp);
132 // Get an incomplete frame when enabled.
133 const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
134 dual_receiver->State() == kPassive &&
135 dual_receiver->NackMode() == kNack);
136 if (dual_receiver_enabled_and_passive &&
137 !jitter_buffer_.CompleteSequenceWithNextFrame()) {
138 // Jitter buffer state might get corrupt with this frame.
139 dual_receiver->CopyJitterBufferStateFromReceiver(*this);
141 found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(
149 // We have a frame - Set timing and render timestamp.
150 timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
151 const int64_t now_ms = clock_->TimeInMilliseconds();
152 timing_->UpdateCurrentDelay(frame_timestamp);
153 next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
154 // Check render timing.
155 bool timing_error = false;
156 // Assume that render timing errors are due to changes in the video stream.
157 if (next_render_time_ms < 0) {
159 } else if (abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
160 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
161 VCMId(vcm_id_, receiver_id_),
162 "This frame is out of our delay bounds, resetting jitter "
164 static_cast<int>(abs(next_render_time_ms - now_ms)),
165 max_video_delay_ms_);
167 } else if (static_cast<int>(timing_->TargetVideoDelay()) >
168 max_video_delay_ms_) {
169 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
170 VCMId(vcm_id_, receiver_id_),
171 "More than %u ms target delay. Flushing jitter buffer and"
172 "resetting timing.", max_video_delay_ms_);
177 // Timing error => reset timing and flush the jitter buffer.
178 jitter_buffer_.Flush();
183 if (!render_timing) {
184 // Decode frame as close as possible to the render timestamp.
185 const int32_t available_wait_time = max_wait_time_ms -
186 static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
187 uint16_t new_max_wait_time = static_cast<uint16_t>(
188 VCM_MAX(available_wait_time, 0));
189 uint32_t wait_time_ms = timing_->MaxWaitingTime(
190 next_render_time_ms, clock_->TimeInMilliseconds());
191 if (new_max_wait_time < wait_time_ms) {
192 // We're not allowed to wait until the frame is supposed to be rendered,
193 // waiting as long as we're allowed to avoid busy looping, and then return
194 // NULL. Next call to this function might return the frame.
195 render_wait_event_->Wait(max_wait_time_ms);
198 // Wait until it's time to render.
199 render_wait_event_->Wait(wait_time_ms);
202 // Extract the frame from the jitter buffer and set the render time.
203 VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
207 frame->SetRenderTime(next_render_time_ms);
208 TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
209 "SetRenderTS", "render_time", next_render_time_ms);
210 if (dual_receiver != NULL) {
211 dual_receiver->UpdateState(*frame);
213 if (!frame->Complete()) {
214 // Update stats for incomplete frames.
215 bool retransmitted = false;
216 const int64_t last_packet_time_ms =
217 jitter_buffer_.LastPacketTime(frame, &retransmitted);
218 if (last_packet_time_ms >= 0 && !retransmitted) {
219 // We don't want to include timestamps which have suffered from
220 // retransmission here, since we compensate with extra retransmission
221 // delay within the jitter estimate.
222 timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms);
228 void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
229 jitter_buffer_.ReleaseFrame(frame);
232 void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
233 uint32_t* framerate) {
236 jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
239 void VCMReceiver::ReceivedFrameCount(VCMFrameCount* frame_count) const {
241 jitter_buffer_.FrameStatistics(&frame_count->numDeltaFrames,
242 &frame_count->numKeyFrames);
245 uint32_t VCMReceiver::DiscardedPackets() const {
246 return jitter_buffer_.num_discarded_packets();
249 void VCMReceiver::SetNackMode(VCMNackMode nackMode,
250 int low_rtt_nack_threshold_ms,
251 int high_rtt_nack_threshold_ms) {
252 CriticalSectionScoped cs(crit_sect_);
253 // Default to always having NACK enabled in hybrid mode.
254 jitter_buffer_.SetNackMode(nackMode, low_rtt_nack_threshold_ms,
255 high_rtt_nack_threshold_ms);
257 state_ = kPassive; // The dual decoder defaults to passive.
261 void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
262 int max_packet_age_to_nack,
263 int max_incomplete_time_ms) {
264 jitter_buffer_.SetNackSettings(max_nack_list_size,
265 max_packet_age_to_nack,
266 max_incomplete_time_ms);
269 VCMNackMode VCMReceiver::NackMode() const {
270 CriticalSectionScoped cs(crit_sect_);
271 return jitter_buffer_.nack_mode();
274 VCMNackStatus VCMReceiver::NackList(uint16_t* nack_list,
276 uint16_t* nack_list_length) {
277 bool request_key_frame = false;
278 uint16_t* internal_nack_list = jitter_buffer_.GetNackList(
279 nack_list_length, &request_key_frame);
280 if (*nack_list_length > size) {
281 *nack_list_length = 0;
282 return kNackNeedMoreMemory;
284 if (internal_nack_list != NULL && *nack_list_length > 0) {
285 memcpy(nack_list, internal_nack_list, *nack_list_length * sizeof(uint16_t));
287 if (request_key_frame) {
288 return kNackKeyFrameRequest;
293 // Decide whether we should change decoder state. This should be done if the
294 // dual decoder has caught up with the decoder decoding with packet losses.
295 bool VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dual_frame,
296 VCMReceiver& dual_receiver) const {
297 if (dual_frame == NULL) {
300 if (jitter_buffer_.LastDecodedTimestamp() == dual_frame->TimeStamp()) {
301 dual_receiver.UpdateState(kWaitForPrimaryDecode);
307 void VCMReceiver::CopyJitterBufferStateFromReceiver(
308 const VCMReceiver& receiver) {
309 jitter_buffer_.CopyFrom(receiver.jitter_buffer_);
312 VCMReceiverState VCMReceiver::State() const {
313 CriticalSectionScoped cs(crit_sect_);
317 void VCMReceiver::SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) {
318 jitter_buffer_.SetDecodeErrorMode(decode_error_mode);
321 VCMDecodeErrorMode VCMReceiver::DecodeErrorMode() const {
322 return jitter_buffer_.decode_error_mode();
325 int VCMReceiver::SetMinReceiverDelay(int desired_delay_ms) {
326 CriticalSectionScoped cs(crit_sect_);
327 if (desired_delay_ms < 0 || desired_delay_ms > kMaxReceiverDelayMs) {
330 max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
331 // Initializing timing to the desired delay.
332 timing_->set_min_playout_delay(desired_delay_ms);
336 int VCMReceiver::RenderBufferSizeMs() {
337 uint32_t timestamp_start = 0u;
338 uint32_t timestamp_end = 0u;
339 // Render timestamps are computed just prior to decoding. Therefore this is
340 // only an estimate based on frames' timestamps and current timing state.
341 jitter_buffer_.RenderBufferSize(×tamp_start, ×tamp_end);
342 if (timestamp_start == timestamp_end) {
346 const int64_t now_ms = clock_->TimeInMilliseconds();
347 timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
348 // Get render timestamps.
349 uint32_t render_start = timing_->RenderTimeMs(timestamp_start, now_ms);
350 uint32_t render_end = timing_->RenderTimeMs(timestamp_end, now_ms);
351 return render_end - render_start;
354 void VCMReceiver::UpdateState(VCMReceiverState new_state) {
355 CriticalSectionScoped cs(crit_sect_);
356 assert(!(state_ == kPassive && new_state == kWaitForPrimaryDecode));
360 void VCMReceiver::UpdateState(const VCMEncodedFrame& frame) {
361 if (jitter_buffer_.nack_mode() == kNoNack) {
362 // Dual decoder mode has not been enabled.
365 // Update the dual receiver state.
366 if (frame.Complete() && frame.FrameType() == kVideoFrameKey) {
367 UpdateState(kPassive);
369 if (State() == kWaitForPrimaryDecode &&
370 frame.Complete() && !frame.MissingFrame()) {
371 UpdateState(kPassive);
373 if (frame.MissingFrame() || !frame.Complete()) {
374 // State was corrupted, enable dual receiver.
375 UpdateState(kReceiving);
378 } // namespace webrtc