- add third_party src.
[platform/framework/web/crosswalk.git] / src / third_party / webrtc / modules / video_coding / main / source / receiver.cc
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #include "webrtc/modules/video_coding/main/source/receiver.h"
12
13 #include <assert.h>
14
15 #include "webrtc/modules/video_coding/main/source/encoded_frame.h"
16 #include "webrtc/modules/video_coding/main/source/internal_defines.h"
17 #include "webrtc/modules/video_coding/main/source/media_opt_util.h"
18 #include "webrtc/system_wrappers/interface/clock.h"
19 #include "webrtc/system_wrappers/interface/trace.h"
20 #include "webrtc/system_wrappers/interface/trace_event.h"
21
22 namespace webrtc {
23
24 enum { kMaxReceiverDelayMs = 10000 };
25
26 VCMReceiver::VCMReceiver(VCMTiming* timing,
27                          Clock* clock,
28                          EventFactory* event_factory,
29                          int32_t vcm_id,
30                          int32_t receiver_id,
31                          bool master)
32     : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
33       vcm_id_(vcm_id),
34       clock_(clock),
35       receiver_id_(receiver_id),
36       master_(master),
37       jitter_buffer_(clock_, event_factory, vcm_id, receiver_id, master),
38       timing_(timing),
39       render_wait_event_(event_factory->CreateEvent()),
40       state_(kPassive),
41       max_video_delay_ms_(kMaxVideoDelayMs) {}
42
43 VCMReceiver::~VCMReceiver() {
44   render_wait_event_->Set();
45   delete crit_sect_;
46 }
47
48 void VCMReceiver::Reset() {
49   CriticalSectionScoped cs(crit_sect_);
50   if (!jitter_buffer_.Running()) {
51     jitter_buffer_.Start();
52   } else {
53     jitter_buffer_.Flush();
54   }
55   render_wait_event_->Reset();
56   if (master_) {
57     state_ = kReceiving;
58   } else {
59     state_ = kPassive;
60   }
61 }
62
63 int32_t VCMReceiver::Initialize() {
64   Reset();
65   CriticalSectionScoped cs(crit_sect_);
66   if (!master_) {
67     SetNackMode(kNoNack, -1, -1);
68   }
69   return VCM_OK;
70 }
71
72 void VCMReceiver::UpdateRtt(uint32_t rtt) {
73   jitter_buffer_.UpdateRtt(rtt);
74 }
75
76 int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
77                                   uint16_t frame_width,
78                                   uint16_t frame_height) {
79   if (packet.frameType == kVideoFrameKey) {
80     WEBRTC_TRACE(webrtc::kTraceInfo, webrtc::kTraceVideoCoding,
81                  VCMId(vcm_id_, receiver_id_),
82                  "Inserting key frame packet seqnum=%u, timestamp=%u",
83                  packet.seqNum, packet.timestamp);
84   }
85
86   // Insert the packet into the jitter buffer. The packet can either be empty or
87   // contain media at this point.
88   bool retransmitted = false;
89   const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet,
90                                                              &retransmitted);
91   if (ret == kOldPacket) {
92     return VCM_OK;
93   } else if (ret == kFlushIndicator) {
94     return VCM_FLUSH_INDICATOR;
95   } else if (ret < 0) {
96     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding,
97                  VCMId(vcm_id_, receiver_id_),
98                  "Error inserting packet seqnum=%u, timestamp=%u",
99                  packet.seqNum, packet.timestamp);
100     return VCM_JITTER_BUFFER_ERROR;
101   }
102   if (ret == kCompleteSession && !retransmitted) {
103     // We don't want to include timestamps which have suffered from
104     // retransmission here, since we compensate with extra retransmission
105     // delay within the jitter estimate.
106     timing_->IncomingTimestamp(packet.timestamp, clock_->TimeInMilliseconds());
107   }
108   if (master_) {
109     // Only trace the primary receiver to make it possible to parse and plot
110     // the trace file.
111     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
112                  VCMId(vcm_id_, receiver_id_),
113                  "Packet seqnum=%u timestamp=%u inserted at %u",
114                  packet.seqNum, packet.timestamp,
115                  MaskWord64ToUWord32(clock_->TimeInMilliseconds()));
116   }
117   return VCM_OK;
118 }
119
120 VCMEncodedFrame* VCMReceiver::FrameForDecoding(
121     uint16_t max_wait_time_ms,
122     int64_t& next_render_time_ms,
123     bool render_timing,
124     VCMReceiver* dual_receiver) {
125   const int64_t start_time_ms = clock_->TimeInMilliseconds();
126   uint32_t frame_timestamp = 0;
127   // Exhaust wait time to get a complete frame for decoding.
128   bool found_frame = jitter_buffer_.NextCompleteTimestamp(
129       max_wait_time_ms, &frame_timestamp);
130
131   if (!found_frame) {
132     // Get an incomplete frame when enabled.
133     const bool dual_receiver_enabled_and_passive = (dual_receiver != NULL &&
134         dual_receiver->State() == kPassive &&
135         dual_receiver->NackMode() == kNack);
136     if (dual_receiver_enabled_and_passive &&
137         !jitter_buffer_.CompleteSequenceWithNextFrame()) {
138       // Jitter buffer state might get corrupt with this frame.
139       dual_receiver->CopyJitterBufferStateFromReceiver(*this);
140     }
141     found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(
142         &frame_timestamp);
143   }
144
145   if (!found_frame) {
146     return NULL;
147   }
148
149   // We have a frame - Set timing and render timestamp.
150   timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
151   const int64_t now_ms = clock_->TimeInMilliseconds();
152   timing_->UpdateCurrentDelay(frame_timestamp);
153   next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
154   // Check render timing.
155   bool timing_error = false;
156   // Assume that render timing errors are due to changes in the video stream.
157   if (next_render_time_ms < 0) {
158     timing_error = true;
159   } else if (abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
160     WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
161                  VCMId(vcm_id_, receiver_id_),
162                  "This frame is out of our delay bounds, resetting jitter "
163                  "buffer: %d > %d",
164                  static_cast<int>(abs(next_render_time_ms - now_ms)),
165                  max_video_delay_ms_);
166     timing_error = true;
167   } else if (static_cast<int>(timing_->TargetVideoDelay()) >
168              max_video_delay_ms_) {
169     WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
170                  VCMId(vcm_id_, receiver_id_),
171                  "More than %u ms target delay. Flushing jitter buffer and"
172                  "resetting timing.", max_video_delay_ms_);
173     timing_error = true;
174   }
175
176   if (timing_error) {
177     // Timing error => reset timing and flush the jitter buffer.
178     jitter_buffer_.Flush();
179     timing_->Reset();
180     return NULL;
181   }
182
183   if (!render_timing) {
184     // Decode frame as close as possible to the render timestamp.
185     const int32_t available_wait_time = max_wait_time_ms -
186         static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
187     uint16_t new_max_wait_time = static_cast<uint16_t>(
188         VCM_MAX(available_wait_time, 0));
189     uint32_t wait_time_ms = timing_->MaxWaitingTime(
190         next_render_time_ms, clock_->TimeInMilliseconds());
191     if (new_max_wait_time < wait_time_ms) {
192       // We're not allowed to wait until the frame is supposed to be rendered,
193       // waiting as long as we're allowed to avoid busy looping, and then return
194       // NULL. Next call to this function might return the frame.
195       render_wait_event_->Wait(max_wait_time_ms);
196       return NULL;
197     }
198     // Wait until it's time to render.
199     render_wait_event_->Wait(wait_time_ms);
200   }
201
202   // Extract the frame from the jitter buffer and set the render time.
203   VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
204   if (frame == NULL) {
205     return NULL;
206   }
207   frame->SetRenderTime(next_render_time_ms);
208   TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
209                           "SetRenderTS", "render_time", next_render_time_ms);
210   if (dual_receiver != NULL) {
211     dual_receiver->UpdateState(*frame);
212   }
213   if (!frame->Complete()) {
214     // Update stats for incomplete frames.
215     bool retransmitted = false;
216     const int64_t last_packet_time_ms =
217         jitter_buffer_.LastPacketTime(frame, &retransmitted);
218     if (last_packet_time_ms >= 0 && !retransmitted) {
219       // We don't want to include timestamps which have suffered from
220       // retransmission here, since we compensate with extra retransmission
221       // delay within the jitter estimate.
222       timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms);
223     }
224   }
225   return frame;
226 }
227
228 void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
229   jitter_buffer_.ReleaseFrame(frame);
230 }
231
232 void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
233                                     uint32_t* framerate) {
234   assert(bitrate);
235   assert(framerate);
236   jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
237 }
238
239 void VCMReceiver::ReceivedFrameCount(VCMFrameCount* frame_count) const {
240   assert(frame_count);
241   jitter_buffer_.FrameStatistics(&frame_count->numDeltaFrames,
242                                  &frame_count->numKeyFrames);
243 }
244
245 uint32_t VCMReceiver::DiscardedPackets() const {
246   return jitter_buffer_.num_discarded_packets();
247 }
248
249 void VCMReceiver::SetNackMode(VCMNackMode nackMode,
250                               int low_rtt_nack_threshold_ms,
251                               int high_rtt_nack_threshold_ms) {
252   CriticalSectionScoped cs(crit_sect_);
253   // Default to always having NACK enabled in hybrid mode.
254   jitter_buffer_.SetNackMode(nackMode, low_rtt_nack_threshold_ms,
255                              high_rtt_nack_threshold_ms);
256   if (!master_) {
257     state_ = kPassive;  // The dual decoder defaults to passive.
258   }
259 }
260
261 void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
262                                   int max_packet_age_to_nack,
263                                   int max_incomplete_time_ms) {
264   jitter_buffer_.SetNackSettings(max_nack_list_size,
265                                  max_packet_age_to_nack,
266                                  max_incomplete_time_ms);
267 }
268
269 VCMNackMode VCMReceiver::NackMode() const {
270   CriticalSectionScoped cs(crit_sect_);
271   return jitter_buffer_.nack_mode();
272 }
273
274 VCMNackStatus VCMReceiver::NackList(uint16_t* nack_list,
275                                     uint16_t size,
276                                     uint16_t* nack_list_length) {
277   bool request_key_frame = false;
278   uint16_t* internal_nack_list = jitter_buffer_.GetNackList(
279       nack_list_length, &request_key_frame);
280   if (*nack_list_length > size) {
281     *nack_list_length = 0;
282     return kNackNeedMoreMemory;
283   }
284   if (internal_nack_list != NULL && *nack_list_length > 0) {
285     memcpy(nack_list, internal_nack_list, *nack_list_length * sizeof(uint16_t));
286   }
287   if (request_key_frame) {
288     return kNackKeyFrameRequest;
289   }
290   return kNackOk;
291 }
292
293 // Decide whether we should change decoder state. This should be done if the
294 // dual decoder has caught up with the decoder decoding with packet losses.
295 bool VCMReceiver::DualDecoderCaughtUp(VCMEncodedFrame* dual_frame,
296                                       VCMReceiver& dual_receiver) const {
297   if (dual_frame == NULL) {
298     return false;
299   }
300   if (jitter_buffer_.LastDecodedTimestamp() == dual_frame->TimeStamp()) {
301     dual_receiver.UpdateState(kWaitForPrimaryDecode);
302     return true;
303   }
304   return false;
305 }
306
307 void VCMReceiver::CopyJitterBufferStateFromReceiver(
308     const VCMReceiver& receiver) {
309   jitter_buffer_.CopyFrom(receiver.jitter_buffer_);
310 }
311
312 VCMReceiverState VCMReceiver::State() const {
313   CriticalSectionScoped cs(crit_sect_);
314   return state_;
315 }
316
317 void VCMReceiver::SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) {
318   jitter_buffer_.SetDecodeErrorMode(decode_error_mode);
319 }
320
321 VCMDecodeErrorMode VCMReceiver::DecodeErrorMode() const {
322   return jitter_buffer_.decode_error_mode();
323 }
324
325 int VCMReceiver::SetMinReceiverDelay(int desired_delay_ms) {
326   CriticalSectionScoped cs(crit_sect_);
327   if (desired_delay_ms < 0 || desired_delay_ms > kMaxReceiverDelayMs) {
328     return -1;
329   }
330   max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
331   // Initializing timing to the desired delay.
332   timing_->set_min_playout_delay(desired_delay_ms);
333   return 0;
334 }
335
336 int VCMReceiver::RenderBufferSizeMs() {
337   uint32_t timestamp_start = 0u;
338   uint32_t timestamp_end = 0u;
339   // Render timestamps are computed just prior to decoding. Therefore this is
340   // only an estimate based on frames' timestamps and current timing state.
341   jitter_buffer_.RenderBufferSize(&timestamp_start, &timestamp_end);
342   if (timestamp_start == timestamp_end) {
343     return 0;
344   }
345   // Update timing.
346   const int64_t now_ms = clock_->TimeInMilliseconds();
347   timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
348   // Get render timestamps.
349   uint32_t render_start = timing_->RenderTimeMs(timestamp_start, now_ms);
350   uint32_t render_end = timing_->RenderTimeMs(timestamp_end, now_ms);
351   return render_end - render_start;
352 }
353
354 void VCMReceiver::UpdateState(VCMReceiverState new_state) {
355   CriticalSectionScoped cs(crit_sect_);
356   assert(!(state_ == kPassive && new_state == kWaitForPrimaryDecode));
357   state_ = new_state;
358 }
359
360 void VCMReceiver::UpdateState(const VCMEncodedFrame& frame) {
361   if (jitter_buffer_.nack_mode() == kNoNack) {
362     // Dual decoder mode has not been enabled.
363     return;
364   }
365   // Update the dual receiver state.
366   if (frame.Complete() && frame.FrameType() == kVideoFrameKey) {
367     UpdateState(kPassive);
368   }
369   if (State() == kWaitForPrimaryDecode &&
370       frame.Complete() && !frame.MissingFrame()) {
371     UpdateState(kPassive);
372   }
373   if (frame.MissingFrame() || !frame.Complete()) {
374     // State was corrupted, enable dual receiver.
375     UpdateState(kReceiving);
376   }
377 }
378 }  // namespace webrtc