Upstream version 5.34.104.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / gpu_channel.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
8
9 #include "content/common/gpu/gpu_channel.h"
10
11 #include <queue>
12 #include <vector>
13
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "content/common/gpu/devtools_gpu_agent.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_manager.h"
29 #include "gpu/command_buffer/service/mailbox_manager.h"
30 #include "ipc/ipc_channel.h"
31 #include "ipc/ipc_channel_proxy.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_surface.h"
35
36 #if defined(OS_POSIX)
37 #include "ipc/ipc_channel_posix.h"
38 #endif
39
40 namespace content {
41 namespace {
42
43 // Number of milliseconds between successive vsync. Many GL commands block
44 // on vsync, so thresholds for preemption should be multiples of this.
45 const int64 kVsyncIntervalMs = 17;
46
47 // Amount of time that we will wait for an IPC to be processed before
48 // preempting. After a preemption, we must wait this long before triggering
49 // another preemption.
50 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
51
52 // Once we trigger a preemption, the maximum duration that we will wait
53 // before clearing the preemption.
54 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
55
56 // Stop the preemption once the time for the longest pending IPC drops
57 // below this threshold.
58 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
59
60 }  // anonymous namespace
61
62 // This filter does three things:
63 // - it counts and timestamps each message forwarded to the channel
64 //   so that we can preempt other channels if a message takes too long to
65 //   process. To guarantee fairness, we must wait a minimum amount of time
66 //   before preempting and we limit the amount of time that we can preempt in
67 //   one shot (see constants above).
68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
69 //   thread, generating the sync point ID and responding immediately, and then
70 //   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
71 //   into the channel's queue.
72 // - it generates mailbox names for clients of the GPU process on the IO thread.
73 class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
74  public:
75   // Takes ownership of gpu_channel (see below).
76   GpuChannelMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
77                           scoped_refptr<SyncPointManager> sync_point_manager,
78                           scoped_refptr<base::MessageLoopProxy> message_loop)
79       : preemption_state_(IDLE),
80         gpu_channel_(gpu_channel),
81         channel_(NULL),
82         sync_point_manager_(sync_point_manager),
83         message_loop_(message_loop),
84         messages_forwarded_to_channel_(0),
85         a_stub_is_descheduled_(false) {
86   }
87
88   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
89     DCHECK(!channel_);
90     channel_ = channel;
91   }
92
93   virtual void OnFilterRemoved() OVERRIDE {
94     DCHECK(channel_);
95     channel_ = NULL;
96   }
97
98   virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
99     DCHECK(channel_);
100
101     bool handled = false;
102     if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
103       // This message should not be sent explicitly by the renderer.
104       DLOG(ERROR) << "Client should not send "
105                      "GpuCommandBufferMsg_RetireSyncPoint message";
106       handled = true;
107     }
108
109     // All other messages get processed by the GpuChannel.
110     if (!handled) {
111       messages_forwarded_to_channel_++;
112       if (preempting_flag_.get())
113         pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
114       UpdatePreemptionState();
115     }
116
117     if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
118       uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
119       IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
120       GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
121       Send(reply);
122       message_loop_->PostTask(FROM_HERE, base::Bind(
123           &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
124           gpu_channel_,
125           sync_point_manager_,
126           message.routing_id(),
127           sync_point));
128       handled = true;
129     }
130     return handled;
131   }
132
133   void MessageProcessed(uint64 messages_processed) {
134     while (!pending_messages_.empty() &&
135            pending_messages_.front().message_number <= messages_processed)
136       pending_messages_.pop();
137     UpdatePreemptionState();
138   }
139
140   void SetPreemptingFlagAndSchedulingState(
141       gpu::PreemptionFlag* preempting_flag,
142       bool a_stub_is_descheduled) {
143     preempting_flag_ = preempting_flag;
144     a_stub_is_descheduled_ = a_stub_is_descheduled;
145   }
146
147   void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
148     a_stub_is_descheduled_ = a_stub_is_descheduled;
149     UpdatePreemptionState();
150   }
151
152   bool Send(IPC::Message* message) {
153     return channel_->Send(message);
154   }
155
156  protected:
157   virtual ~GpuChannelMessageFilter() {
158     message_loop_->PostTask(FROM_HERE, base::Bind(
159         &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
160   }
161
162  private:
163   enum PreemptionState {
164     // Either there's no other channel to preempt, there are no messages
165     // pending processing, or we just finished preempting and have to wait
166     // before preempting again.
167     IDLE,
168     // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
169     WAITING,
170     // We can preempt whenever any IPC processing takes more than
171     // kPreemptWaitTimeMs.
172     CHECKING,
173     // We are currently preempting (i.e. no stub is descheduled).
174     PREEMPTING,
175     // We would like to preempt, but some stub is descheduled.
176     WOULD_PREEMPT_DESCHEDULED,
177   };
178
179   PreemptionState preemption_state_;
180
181   // Maximum amount of time that we can spend in PREEMPTING.
182   // It is reset when we transition to IDLE.
183   base::TimeDelta max_preemption_time_;
184
185   struct PendingMessage {
186     uint64 message_number;
187     base::TimeTicks time_received;
188
189     explicit PendingMessage(uint64 message_number)
190         : message_number(message_number),
191           time_received(base::TimeTicks::Now()) {
192     }
193   };
194
195   void UpdatePreemptionState() {
196     switch (preemption_state_) {
197       case IDLE:
198         if (preempting_flag_.get() && !pending_messages_.empty())
199           TransitionToWaiting();
200         break;
201       case WAITING:
202         // A timer will transition us to CHECKING.
203         DCHECK(timer_.IsRunning());
204         break;
205       case CHECKING:
206         if (!pending_messages_.empty()) {
207           base::TimeDelta time_elapsed =
208               base::TimeTicks::Now() - pending_messages_.front().time_received;
209           if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
210             // Schedule another check for when the IPC may go long.
211             timer_.Start(
212                 FROM_HERE,
213                 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
214                     time_elapsed,
215                 this, &GpuChannelMessageFilter::UpdatePreemptionState);
216           } else {
217             if (a_stub_is_descheduled_)
218               TransitionToWouldPreemptDescheduled();
219             else
220               TransitionToPreempting();
221           }
222         }
223         break;
224       case PREEMPTING:
225         // A TransitionToIdle() timer should always be running in this state.
226         DCHECK(timer_.IsRunning());
227         if (a_stub_is_descheduled_)
228           TransitionToWouldPreemptDescheduled();
229         else
230           TransitionToIdleIfCaughtUp();
231         break;
232       case WOULD_PREEMPT_DESCHEDULED:
233         // A TransitionToIdle() timer should never be running in this state.
234         DCHECK(!timer_.IsRunning());
235         if (!a_stub_is_descheduled_)
236           TransitionToPreempting();
237         else
238           TransitionToIdleIfCaughtUp();
239         break;
240       default:
241         NOTREACHED();
242     }
243   }
244
245   void TransitionToIdleIfCaughtUp() {
246     DCHECK(preemption_state_ == PREEMPTING ||
247            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
248     if (pending_messages_.empty()) {
249       TransitionToIdle();
250     } else {
251       base::TimeDelta time_elapsed =
252           base::TimeTicks::Now() - pending_messages_.front().time_received;
253       if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
254         TransitionToIdle();
255     }
256   }
257
258   void TransitionToIdle() {
259     DCHECK(preemption_state_ == PREEMPTING ||
260            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
261     // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
262     timer_.Stop();
263
264     preemption_state_ = IDLE;
265     preempting_flag_->Reset();
266     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
267
268     UpdatePreemptionState();
269   }
270
271   void TransitionToWaiting() {
272     DCHECK_EQ(preemption_state_, IDLE);
273     DCHECK(!timer_.IsRunning());
274
275     preemption_state_ = WAITING;
276     timer_.Start(
277         FROM_HERE,
278         base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
279         this, &GpuChannelMessageFilter::TransitionToChecking);
280   }
281
282   void TransitionToChecking() {
283     DCHECK_EQ(preemption_state_, WAITING);
284     DCHECK(!timer_.IsRunning());
285
286     preemption_state_ = CHECKING;
287     max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
288     UpdatePreemptionState();
289   }
290
291   void TransitionToPreempting() {
292     DCHECK(preemption_state_ == CHECKING ||
293            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
294     DCHECK(!a_stub_is_descheduled_);
295
296     // Stop any pending state update checks that we may have queued
297     // while CHECKING.
298     if (preemption_state_ == CHECKING)
299       timer_.Stop();
300
301     preemption_state_ = PREEMPTING;
302     preempting_flag_->Set();
303     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
304
305     timer_.Start(
306        FROM_HERE,
307        max_preemption_time_,
308        this, &GpuChannelMessageFilter::TransitionToIdle);
309
310     UpdatePreemptionState();
311   }
312
313   void TransitionToWouldPreemptDescheduled() {
314     DCHECK(preemption_state_ == CHECKING ||
315            preemption_state_ == PREEMPTING);
316     DCHECK(a_stub_is_descheduled_);
317
318     if (preemption_state_ == CHECKING) {
319       // Stop any pending state update checks that we may have queued
320       // while CHECKING.
321       timer_.Stop();
322     } else {
323       // Stop any TransitionToIdle() timers that we may have queued
324       // while PREEMPTING.
325       timer_.Stop();
326       max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
327       if (max_preemption_time_ < base::TimeDelta()) {
328         TransitionToIdle();
329         return;
330       }
331     }
332
333     preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
334     preempting_flag_->Reset();
335     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
336
337     UpdatePreemptionState();
338   }
339
340   static void InsertSyncPointOnMainThread(
341       base::WeakPtr<GpuChannel>* gpu_channel,
342       scoped_refptr<SyncPointManager> manager,
343       int32 routing_id,
344       uint32 sync_point) {
345     // This function must ensure that the sync point will be retired. Normally
346     // we'll find the stub based on the routing ID, and associate the sync point
347     // with it, but if that fails for any reason (channel or stub already
348     // deleted, invalid routing id), we need to retire the sync point
349     // immediately.
350     if (gpu_channel->get()) {
351       GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
352           routing_id);
353       if (stub) {
354         stub->AddSyncPoint(sync_point);
355         GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
356         gpu_channel->get()->OnMessageReceived(message);
357         return;
358       } else {
359         gpu_channel->get()->MessageProcessed();
360       }
361     }
362     manager->RetireSyncPoint(sync_point);
363   }
364
365   static void DeleteWeakPtrOnMainThread(
366       base::WeakPtr<GpuChannel>* gpu_channel) {
367     delete gpu_channel;
368   }
369
370   // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
371   // IO thread, it's only passed through - therefore the WeakPtr assumptions are
372   // respected.
373   base::WeakPtr<GpuChannel>* gpu_channel_;
374   IPC::Channel* channel_;
375   scoped_refptr<SyncPointManager> sync_point_manager_;
376   scoped_refptr<base::MessageLoopProxy> message_loop_;
377   scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
378
379   std::queue<PendingMessage> pending_messages_;
380
381   // Count of the number of IPCs forwarded to the GpuChannel.
382   uint64 messages_forwarded_to_channel_;
383
384   base::OneShotTimer<GpuChannelMessageFilter> timer_;
385
386   bool a_stub_is_descheduled_;
387 };
388
389 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
390                        GpuWatchdog* watchdog,
391                        gfx::GLShareGroup* share_group,
392                        gpu::gles2::MailboxManager* mailbox,
393                        int client_id,
394                        bool software)
395     : gpu_channel_manager_(gpu_channel_manager),
396       messages_processed_(0),
397       client_id_(client_id),
398       share_group_(share_group ? share_group : new gfx::GLShareGroup),
399       mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
400       image_manager_(new gpu::gles2::ImageManager),
401       watchdog_(watchdog),
402       software_(software),
403       handle_messages_scheduled_(false),
404       processed_get_state_fast_(false),
405       currently_processing_message_(NULL),
406       weak_factory_(this),
407       num_stubs_descheduled_(0) {
408   DCHECK(gpu_channel_manager);
409   DCHECK(client_id);
410
411   channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
412   const CommandLine* command_line = CommandLine::ForCurrentProcess();
413   log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
414 }
415
416
417 bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
418                       base::WaitableEvent* shutdown_event) {
419   DCHECK(!channel_.get());
420
421   // Map renderer ID to a (single) channel to that process.
422   channel_.reset(new IPC::SyncChannel(
423       channel_id_,
424       IPC::Channel::MODE_SERVER,
425       this,
426       io_message_loop,
427       false,
428       shutdown_event));
429
430   base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
431       weak_factory_.GetWeakPtr()));
432
433   filter_ = new GpuChannelMessageFilter(
434       weak_ptr,
435       gpu_channel_manager_->sync_point_manager(),
436       base::MessageLoopProxy::current());
437   io_message_loop_ = io_message_loop;
438   channel_->AddFilter(filter_.get());
439
440   devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
441
442   return true;
443 }
444
445 std::string GpuChannel::GetChannelName() {
446   return channel_id_;
447 }
448
449 #if defined(OS_POSIX)
450 int GpuChannel::TakeRendererFileDescriptor() {
451   if (!channel_) {
452     NOTREACHED();
453     return -1;
454   }
455   return channel_->TakeClientFileDescriptor();
456 }
457 #endif  // defined(OS_POSIX)
458
459 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
460   if (log_messages_) {
461     DVLOG(1) << "received message @" << &message << " on channel @" << this
462              << " with type " << message.type();
463   }
464
465   if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
466     if (processed_get_state_fast_) {
467       // Require a non-GetStateFast message in between two GetStateFast
468       // messages, to ensure progress is made.
469       std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
470
471       while (point != deferred_messages_.end() &&
472              (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
473         ++point;
474       }
475
476       if (point != deferred_messages_.end()) {
477         ++point;
478       }
479
480       deferred_messages_.insert(point, new IPC::Message(message));
481     } else {
482       // Move GetStateFast commands to the head of the queue, so the renderer
483       // doesn't have to wait any longer than necessary.
484       deferred_messages_.push_front(new IPC::Message(message));
485     }
486   } else {
487     deferred_messages_.push_back(new IPC::Message(message));
488   }
489
490   OnScheduled();
491
492   return true;
493 }
494
495 void GpuChannel::OnChannelError() {
496   gpu_channel_manager_->RemoveChannel(client_id_);
497 }
498
499 bool GpuChannel::Send(IPC::Message* message) {
500   // The GPU process must never send a synchronous IPC message to the renderer
501   // process. This could result in deadlock.
502   DCHECK(!message->is_sync());
503   if (log_messages_) {
504     DVLOG(1) << "sending message @" << message << " on channel @" << this
505              << " with type " << message->type();
506   }
507
508   if (!channel_) {
509     delete message;
510     return false;
511   }
512
513   return channel_->Send(message);
514 }
515
516 void GpuChannel::RequeueMessage() {
517   DCHECK(currently_processing_message_);
518   deferred_messages_.push_front(
519       new IPC::Message(*currently_processing_message_));
520   messages_processed_--;
521   currently_processing_message_ = NULL;
522 }
523
524 void GpuChannel::OnScheduled() {
525   if (handle_messages_scheduled_)
526     return;
527   // Post a task to handle any deferred messages. The deferred message queue is
528   // not emptied here, which ensures that OnMessageReceived will continue to
529   // defer newly received messages until the ones in the queue have all been
530   // handled by HandleMessage. HandleMessage is invoked as a
531   // task to prevent reentrancy.
532   base::MessageLoop::current()->PostTask(
533       FROM_HERE,
534       base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
535   handle_messages_scheduled_ = true;
536 }
537
538 void GpuChannel::StubSchedulingChanged(bool scheduled) {
539   bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
540   if (scheduled) {
541     num_stubs_descheduled_--;
542     OnScheduled();
543   } else {
544     num_stubs_descheduled_++;
545   }
546   DCHECK_LE(num_stubs_descheduled_, stubs_.size());
547   bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
548
549   if (a_stub_is_descheduled != a_stub_was_descheduled) {
550     if (preempting_flag_.get()) {
551       io_message_loop_->PostTask(
552           FROM_HERE,
553           base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
554                      filter_,
555                      a_stub_is_descheduled));
556     }
557   }
558 }
559
560 void GpuChannel::CreateViewCommandBuffer(
561     const gfx::GLSurfaceHandle& window,
562     int32 surface_id,
563     const GPUCreateCommandBufferConfig& init_params,
564     int32* route_id) {
565   TRACE_EVENT1("gpu",
566                "GpuChannel::CreateViewCommandBuffer",
567                "surface_id",
568                surface_id);
569
570   *route_id = MSG_ROUTING_NONE;
571
572   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
573
574   // Virtualize compositor contexts on OS X to prevent performance regressions
575   // when enabling FCM.
576   // http://crbug.com/180463
577   bool use_virtualized_gl_context = false;
578 #if defined(OS_MACOSX)
579   use_virtualized_gl_context = true;
580 #endif
581
582   *route_id = GenerateRouteID();
583   scoped_ptr<GpuCommandBufferStub> stub(
584       new GpuCommandBufferStub(this,
585                                share_group,
586                                window,
587                                mailbox_manager_.get(),
588                                image_manager_.get(),
589                                gfx::Size(),
590                                disallowed_features_,
591                                init_params.attribs,
592                                init_params.gpu_preference,
593                                use_virtualized_gl_context,
594                                *route_id,
595                                surface_id,
596                                watchdog_,
597                                software_,
598                                init_params.active_url));
599   if (preempted_flag_.get())
600     stub->SetPreemptByFlag(preempted_flag_);
601   router_.AddRoute(*route_id, stub.get());
602   stubs_.AddWithID(stub.release(), *route_id);
603 }
604
605 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
606   return stubs_.Lookup(route_id);
607 }
608
609 void GpuChannel::CreateImage(
610     gfx::PluginWindowHandle window,
611     int32 image_id,
612     gfx::Size* size) {
613   TRACE_EVENT1("gpu",
614                "GpuChannel::CreateImage",
615                "image_id",
616                image_id);
617
618   *size = gfx::Size();
619
620   if (image_manager_->LookupImage(image_id)) {
621     LOG(ERROR) << "CreateImage failed, image_id already in use.";
622     return;
623   }
624
625   scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
626   if (!image.get())
627     return;
628
629   image_manager_->AddImage(image.get(), image_id);
630   *size = image->GetSize();
631 }
632
633 void GpuChannel::DeleteImage(int32 image_id) {
634   TRACE_EVENT1("gpu",
635                "GpuChannel::DeleteImage",
636                "image_id",
637                image_id);
638
639   image_manager_->RemoveImage(image_id);
640 }
641
642 void GpuChannel::LoseAllContexts() {
643   gpu_channel_manager_->LoseAllContexts();
644 }
645
646 void GpuChannel::MarkAllContextsLost() {
647   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
648        !it.IsAtEnd(); it.Advance()) {
649     it.GetCurrentValue()->MarkContextLost();
650   }
651 }
652
653 void GpuChannel::DestroySoon() {
654   base::MessageLoop::current()->PostTask(
655       FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
656 }
657
658 int32 GpuChannel::GenerateRouteID() {
659   static int32 last_id = 0;
660   return ++last_id;
661 }
662
663 void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
664   router_.AddRoute(route_id, listener);
665 }
666
667 void GpuChannel::RemoveRoute(int32 route_id) {
668   router_.RemoveRoute(route_id);
669 }
670
671 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
672   if (!preempting_flag_.get()) {
673     preempting_flag_ = new gpu::PreemptionFlag;
674     io_message_loop_->PostTask(
675         FROM_HERE, base::Bind(
676             &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
677             filter_, preempting_flag_, num_stubs_descheduled_ > 0));
678   }
679   return preempting_flag_.get();
680 }
681
682 void GpuChannel::SetPreemptByFlag(
683     scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
684   preempted_flag_ = preempted_flag;
685
686   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
687        !it.IsAtEnd(); it.Advance()) {
688     it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
689   }
690 }
691
692 GpuChannel::~GpuChannel() {
693   if (preempting_flag_.get())
694     preempting_flag_->Reset();
695 }
696
697 void GpuChannel::OnDestroy() {
698   TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
699   gpu_channel_manager_->RemoveChannel(client_id_);
700 }
701
702 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
703   bool handled = true;
704   IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
705     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
706                         OnCreateOffscreenCommandBuffer)
707     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
708                         OnDestroyCommandBuffer)
709     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
710     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
711                         OnDestroyVideoEncoder)
712     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
713                         OnDevToolsStartEventsRecording)
714     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
715                         OnDevToolsStopEventsRecording)
716     IPC_MESSAGE_UNHANDLED(handled = false)
717   IPC_END_MESSAGE_MAP()
718   DCHECK(handled) << msg.type();
719   return handled;
720 }
721
722 void GpuChannel::HandleMessage() {
723   handle_messages_scheduled_ = false;
724   if (deferred_messages_.empty())
725     return;
726
727   bool should_fast_track_ack = false;
728   IPC::Message* m = deferred_messages_.front();
729   GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
730
731   do {
732     if (stub) {
733       if (!stub->IsScheduled())
734         return;
735       if (stub->IsPreempted()) {
736         OnScheduled();
737         return;
738       }
739     }
740
741     scoped_ptr<IPC::Message> message(m);
742     deferred_messages_.pop_front();
743     bool message_processed = true;
744
745     processed_get_state_fast_ =
746         (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
747
748     currently_processing_message_ = message.get();
749     bool result;
750     if (message->routing_id() == MSG_ROUTING_CONTROL)
751       result = OnControlMessageReceived(*message);
752     else
753       result = router_.RouteMessage(*message);
754     currently_processing_message_ = NULL;
755
756     if (!result) {
757       // Respond to sync messages even if router failed to route.
758       if (message->is_sync()) {
759         IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
760         reply->set_reply_error();
761         Send(reply);
762       }
763     } else {
764       // If the command buffer becomes unscheduled as a result of handling the
765       // message but still has more commands to process, synthesize an IPC
766       // message to flush that command buffer.
767       if (stub) {
768         if (stub->HasUnprocessedCommands()) {
769           deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
770               stub->route_id()));
771           message_processed = false;
772         }
773       }
774     }
775     if (message_processed)
776       MessageProcessed();
777
778     // We want the EchoACK following the SwapBuffers to be sent as close as
779     // possible, avoiding scheduling other channels in the meantime.
780     should_fast_track_ack = false;
781     if (!deferred_messages_.empty()) {
782       m = deferred_messages_.front();
783       stub = stubs_.Lookup(m->routing_id());
784       should_fast_track_ack =
785           (m->type() == GpuCommandBufferMsg_Echo::ID) &&
786           stub && stub->IsScheduled();
787     }
788   } while (should_fast_track_ack);
789
790   if (!deferred_messages_.empty()) {
791     OnScheduled();
792   }
793 }
794
795 void GpuChannel::OnCreateOffscreenCommandBuffer(
796     const gfx::Size& size,
797     const GPUCreateCommandBufferConfig& init_params,
798     int32* route_id) {
799   TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
800   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
801
802   *route_id = GenerateRouteID();
803
804   scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
805       this,
806       share_group,
807       gfx::GLSurfaceHandle(),
808       mailbox_manager_.get(),
809       image_manager_.get(),
810       size,
811       disallowed_features_,
812       init_params.attribs,
813       init_params.gpu_preference,
814       false,
815       *route_id,
816       0,
817       watchdog_,
818       software_,
819       init_params.active_url));
820   if (preempted_flag_.get())
821     stub->SetPreemptByFlag(preempted_flag_);
822   router_.AddRoute(*route_id, stub.get());
823   stubs_.AddWithID(stub.release(), *route_id);
824   TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
825                "route_id", route_id);
826 }
827
828 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
829   TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
830                "route_id", route_id);
831
832   GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
833   if (!stub)
834     return;
835   bool need_reschedule = (stub && !stub->IsScheduled());
836   router_.RemoveRoute(route_id);
837   stubs_.Remove(route_id);
838   // In case the renderer is currently blocked waiting for a sync reply from the
839   // stub, we need to make sure to reschedule the GpuChannel here.
840   if (need_reschedule) {
841     // This stub won't get a chance to reschedule, so update the count now.
842     StubSchedulingChanged(true);
843   }
844 }
845
846 void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
847   TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
848
849   *route_id = GenerateRouteID();
850   GpuVideoEncodeAccelerator* encoder =
851       new GpuVideoEncodeAccelerator(this, *route_id);
852   router_.AddRoute(*route_id, encoder);
853   video_encoders_.AddWithID(encoder, *route_id);
854 }
855
856 void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
857   TRACE_EVENT1(
858       "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
859   GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
860   if (!encoder)
861     return;
862   router_.RemoveRoute(route_id);
863   video_encoders_.Remove(route_id);
864 }
865
866 void GpuChannel::OnDevToolsStartEventsRecording(int32* route_id) {
867   devtools_gpu_agent_->StartEventsRecording(route_id);
868 }
869
870 void GpuChannel::OnDevToolsStopEventsRecording() {
871   devtools_gpu_agent_->StopEventsRecording();
872 }
873
874 void GpuChannel::MessageProcessed() {
875   messages_processed_++;
876   if (preempting_flag_.get()) {
877     io_message_loop_->PostTask(
878         FROM_HERE,
879         base::Bind(&GpuChannelMessageFilter::MessageProcessed,
880                    filter_,
881                    messages_processed_));
882   }
883 }
884
885 void GpuChannel::CacheShader(const std::string& key,
886                              const std::string& shader) {
887   gpu_channel_manager_->Send(
888       new GpuHostMsg_CacheShader(client_id_, key, shader));
889 }
890
891 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
892   channel_->AddFilter(filter);
893 }
894
895 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
896   channel_->RemoveFilter(filter);
897 }
898
899 }  // namespace content