Upstream version 9.37.195.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / gpu_channel.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
8
9 #include "content/common/gpu/gpu_channel.h"
10
11 #include <queue>
12 #include <vector>
13
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/stl_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/devtools_gpu_agent.h"
22 #include "content/common/gpu/gpu_channel_manager.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_manager.h"
29 #include "gpu/command_buffer/service/mailbox_manager.h"
30 #include "ipc/ipc_channel.h"
31 #include "ipc/message_filter.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_surface.h"
35
36 #if defined(OS_POSIX)
37 #include "ipc/ipc_channel_posix.h"
38 #endif
39
40 namespace content {
41 namespace {
42
43 // Number of milliseconds between successive vsync. Many GL commands block
44 // on vsync, so thresholds for preemption should be multiples of this.
45 const int64 kVsyncIntervalMs = 17;
46
47 // Amount of time that we will wait for an IPC to be processed before
48 // preempting. After a preemption, we must wait this long before triggering
49 // another preemption.
50 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
51
52 // Once we trigger a preemption, the maximum duration that we will wait
53 // before clearing the preemption.
54 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
55
56 // Stop the preemption once the time for the longest pending IPC drops
57 // below this threshold.
58 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
59
60 }  // anonymous namespace
61
62 // This filter does three things:
63 // - it counts and timestamps each message forwarded to the channel
64 //   so that we can preempt other channels if a message takes too long to
65 //   process. To guarantee fairness, we must wait a minimum amount of time
66 //   before preempting and we limit the amount of time that we can preempt in
67 //   one shot (see constants above).
68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
69 //   thread, generating the sync point ID and responding immediately, and then
70 //   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
71 //   into the channel's queue.
72 // - it generates mailbox names for clients of the GPU process on the IO thread.
73 class GpuChannelMessageFilter : public IPC::MessageFilter {
74  public:
75   GpuChannelMessageFilter(base::WeakPtr<GpuChannel> gpu_channel,
76                           scoped_refptr<SyncPointManager> sync_point_manager,
77                           scoped_refptr<base::MessageLoopProxy> message_loop)
78       : preemption_state_(IDLE),
79         gpu_channel_(gpu_channel),
80         sender_(NULL),
81         sync_point_manager_(sync_point_manager),
82         message_loop_(message_loop),
83         messages_forwarded_to_channel_(0),
84         a_stub_is_descheduled_(false) {}
85
86   virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
87     DCHECK(!sender_);
88     sender_ = sender;
89   }
90
91   virtual void OnFilterRemoved() OVERRIDE {
92     DCHECK(sender_);
93     sender_ = NULL;
94   }
95
96   virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
97     DCHECK(sender_);
98
99     bool handled = false;
100     if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
101       // This message should not be sent explicitly by the renderer.
102       DLOG(ERROR) << "Client should not send "
103                      "GpuCommandBufferMsg_RetireSyncPoint message";
104       handled = true;
105     }
106
107     // All other messages get processed by the GpuChannel.
108     if (!handled) {
109       messages_forwarded_to_channel_++;
110       if (preempting_flag_.get())
111         pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
112       UpdatePreemptionState();
113     }
114
115     if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
116       uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
117       IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
118       GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
119       Send(reply);
120       message_loop_->PostTask(FROM_HERE, base::Bind(
121           &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
122           gpu_channel_,
123           sync_point_manager_,
124           message.routing_id(),
125           sync_point));
126       handled = true;
127     }
128     return handled;
129   }
130
131   void MessageProcessed(uint64 messages_processed) {
132     while (!pending_messages_.empty() &&
133            pending_messages_.front().message_number <= messages_processed)
134       pending_messages_.pop();
135     UpdatePreemptionState();
136   }
137
138   void SetPreemptingFlagAndSchedulingState(
139       gpu::PreemptionFlag* preempting_flag,
140       bool a_stub_is_descheduled) {
141     preempting_flag_ = preempting_flag;
142     a_stub_is_descheduled_ = a_stub_is_descheduled;
143   }
144
145   void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
146     a_stub_is_descheduled_ = a_stub_is_descheduled;
147     UpdatePreemptionState();
148   }
149
150   bool Send(IPC::Message* message) {
151     return sender_->Send(message);
152   }
153
154  protected:
155   virtual ~GpuChannelMessageFilter() {}
156
157  private:
158   enum PreemptionState {
159     // Either there's no other channel to preempt, there are no messages
160     // pending processing, or we just finished preempting and have to wait
161     // before preempting again.
162     IDLE,
163     // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
164     WAITING,
165     // We can preempt whenever any IPC processing takes more than
166     // kPreemptWaitTimeMs.
167     CHECKING,
168     // We are currently preempting (i.e. no stub is descheduled).
169     PREEMPTING,
170     // We would like to preempt, but some stub is descheduled.
171     WOULD_PREEMPT_DESCHEDULED,
172   };
173
174   PreemptionState preemption_state_;
175
176   // Maximum amount of time that we can spend in PREEMPTING.
177   // It is reset when we transition to IDLE.
178   base::TimeDelta max_preemption_time_;
179
180   struct PendingMessage {
181     uint64 message_number;
182     base::TimeTicks time_received;
183
184     explicit PendingMessage(uint64 message_number)
185         : message_number(message_number),
186           time_received(base::TimeTicks::Now()) {
187     }
188   };
189
190   void UpdatePreemptionState() {
191     switch (preemption_state_) {
192       case IDLE:
193         if (preempting_flag_.get() && !pending_messages_.empty())
194           TransitionToWaiting();
195         break;
196       case WAITING:
197         // A timer will transition us to CHECKING.
198         DCHECK(timer_.IsRunning());
199         break;
200       case CHECKING:
201         if (!pending_messages_.empty()) {
202           base::TimeDelta time_elapsed =
203               base::TimeTicks::Now() - pending_messages_.front().time_received;
204           if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
205             // Schedule another check for when the IPC may go long.
206             timer_.Start(
207                 FROM_HERE,
208                 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
209                     time_elapsed,
210                 this, &GpuChannelMessageFilter::UpdatePreemptionState);
211           } else {
212             if (a_stub_is_descheduled_)
213               TransitionToWouldPreemptDescheduled();
214             else
215               TransitionToPreempting();
216           }
217         }
218         break;
219       case PREEMPTING:
220         // A TransitionToIdle() timer should always be running in this state.
221         DCHECK(timer_.IsRunning());
222         if (a_stub_is_descheduled_)
223           TransitionToWouldPreemptDescheduled();
224         else
225           TransitionToIdleIfCaughtUp();
226         break;
227       case WOULD_PREEMPT_DESCHEDULED:
228         // A TransitionToIdle() timer should never be running in this state.
229         DCHECK(!timer_.IsRunning());
230         if (!a_stub_is_descheduled_)
231           TransitionToPreempting();
232         else
233           TransitionToIdleIfCaughtUp();
234         break;
235       default:
236         NOTREACHED();
237     }
238   }
239
240   void TransitionToIdleIfCaughtUp() {
241     DCHECK(preemption_state_ == PREEMPTING ||
242            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
243     if (pending_messages_.empty()) {
244       TransitionToIdle();
245     } else {
246       base::TimeDelta time_elapsed =
247           base::TimeTicks::Now() - pending_messages_.front().time_received;
248       if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
249         TransitionToIdle();
250     }
251   }
252
253   void TransitionToIdle() {
254     DCHECK(preemption_state_ == PREEMPTING ||
255            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
256     // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
257     timer_.Stop();
258
259     preemption_state_ = IDLE;
260     preempting_flag_->Reset();
261     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
262
263     UpdatePreemptionState();
264   }
265
266   void TransitionToWaiting() {
267     DCHECK_EQ(preemption_state_, IDLE);
268     DCHECK(!timer_.IsRunning());
269
270     preemption_state_ = WAITING;
271     timer_.Start(
272         FROM_HERE,
273         base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
274         this, &GpuChannelMessageFilter::TransitionToChecking);
275   }
276
277   void TransitionToChecking() {
278     DCHECK_EQ(preemption_state_, WAITING);
279     DCHECK(!timer_.IsRunning());
280
281     preemption_state_ = CHECKING;
282     max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
283     UpdatePreemptionState();
284   }
285
286   void TransitionToPreempting() {
287     DCHECK(preemption_state_ == CHECKING ||
288            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
289     DCHECK(!a_stub_is_descheduled_);
290
291     // Stop any pending state update checks that we may have queued
292     // while CHECKING.
293     if (preemption_state_ == CHECKING)
294       timer_.Stop();
295
296     preemption_state_ = PREEMPTING;
297     preempting_flag_->Set();
298     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
299
300     timer_.Start(
301        FROM_HERE,
302        max_preemption_time_,
303        this, &GpuChannelMessageFilter::TransitionToIdle);
304
305     UpdatePreemptionState();
306   }
307
308   void TransitionToWouldPreemptDescheduled() {
309     DCHECK(preemption_state_ == CHECKING ||
310            preemption_state_ == PREEMPTING);
311     DCHECK(a_stub_is_descheduled_);
312
313     if (preemption_state_ == CHECKING) {
314       // Stop any pending state update checks that we may have queued
315       // while CHECKING.
316       timer_.Stop();
317     } else {
318       // Stop any TransitionToIdle() timers that we may have queued
319       // while PREEMPTING.
320       timer_.Stop();
321       max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
322       if (max_preemption_time_ < base::TimeDelta()) {
323         TransitionToIdle();
324         return;
325       }
326     }
327
328     preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
329     preempting_flag_->Reset();
330     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
331
332     UpdatePreemptionState();
333   }
334
335   static void InsertSyncPointOnMainThread(
336       base::WeakPtr<GpuChannel> gpu_channel,
337       scoped_refptr<SyncPointManager> manager,
338       int32 routing_id,
339       uint32 sync_point) {
340     // This function must ensure that the sync point will be retired. Normally
341     // we'll find the stub based on the routing ID, and associate the sync point
342     // with it, but if that fails for any reason (channel or stub already
343     // deleted, invalid routing id), we need to retire the sync point
344     // immediately.
345     if (gpu_channel) {
346       GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
347       if (stub) {
348         stub->AddSyncPoint(sync_point);
349         GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
350         gpu_channel->OnMessageReceived(message);
351         return;
352       } else {
353         gpu_channel->MessageProcessed();
354       }
355     }
356     manager->RetireSyncPoint(sync_point);
357   }
358
359   // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
360   // passed through - therefore the WeakPtr assumptions are respected.
361   base::WeakPtr<GpuChannel> gpu_channel_;
362   IPC::Sender* sender_;
363   scoped_refptr<SyncPointManager> sync_point_manager_;
364   scoped_refptr<base::MessageLoopProxy> message_loop_;
365   scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
366
367   std::queue<PendingMessage> pending_messages_;
368
369   // Count of the number of IPCs forwarded to the GpuChannel.
370   uint64 messages_forwarded_to_channel_;
371
372   base::OneShotTimer<GpuChannelMessageFilter> timer_;
373
374   bool a_stub_is_descheduled_;
375 };
376
377 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
378                        GpuWatchdog* watchdog,
379                        gfx::GLShareGroup* share_group,
380                        gpu::gles2::MailboxManager* mailbox,
381                        int client_id,
382                        bool software)
383     : gpu_channel_manager_(gpu_channel_manager),
384       messages_processed_(0),
385       client_id_(client_id),
386       share_group_(share_group ? share_group : new gfx::GLShareGroup),
387       mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
388       image_manager_(new gpu::gles2::ImageManager),
389       watchdog_(watchdog),
390       software_(software),
391       handle_messages_scheduled_(false),
392       currently_processing_message_(NULL),
393       weak_factory_(this),
394       num_stubs_descheduled_(0) {
395   DCHECK(gpu_channel_manager);
396   DCHECK(client_id);
397
398   channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
399   const CommandLine* command_line = CommandLine::ForCurrentProcess();
400   log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
401 }
402
403 GpuChannel::~GpuChannel() {
404   STLDeleteElements(&deferred_messages_);
405   if (preempting_flag_.get())
406     preempting_flag_->Reset();
407 }
408
409 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
410                       base::WaitableEvent* shutdown_event) {
411   DCHECK(!channel_.get());
412
413   // Map renderer ID to a (single) channel to that process.
414   channel_ = IPC::SyncChannel::Create(channel_id_,
415                                       IPC::Channel::MODE_SERVER,
416                                       this,
417                                       io_message_loop,
418                                       false,
419                                       shutdown_event);
420
421   filter_ =
422       new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
423                                   gpu_channel_manager_->sync_point_manager(),
424                                   base::MessageLoopProxy::current());
425   io_message_loop_ = io_message_loop;
426   channel_->AddFilter(filter_.get());
427
428   devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
429 }
430
431 std::string GpuChannel::GetChannelName() {
432   return channel_id_;
433 }
434
435 #if defined(OS_POSIX)
436 int GpuChannel::TakeRendererFileDescriptor() {
437   if (!channel_) {
438     NOTREACHED();
439     return -1;
440   }
441   return channel_->TakeClientFileDescriptor();
442 }
443 #endif  // defined(OS_POSIX)
444
445 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
446   if (log_messages_) {
447     DVLOG(1) << "received message @" << &message << " on channel @" << this
448              << " with type " << message.type();
449   }
450
451   if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
452       message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
453     // Move Wait commands to the head of the queue, so the renderer
454     // doesn't have to wait any longer than necessary.
455     deferred_messages_.push_front(new IPC::Message(message));
456   } else {
457     deferred_messages_.push_back(new IPC::Message(message));
458   }
459
460   OnScheduled();
461
462   return true;
463 }
464
465 void GpuChannel::OnChannelError() {
466   gpu_channel_manager_->RemoveChannel(client_id_);
467 }
468
469 bool GpuChannel::Send(IPC::Message* message) {
470   // The GPU process must never send a synchronous IPC message to the renderer
471   // process. This could result in deadlock.
472   DCHECK(!message->is_sync());
473   if (log_messages_) {
474     DVLOG(1) << "sending message @" << message << " on channel @" << this
475              << " with type " << message->type();
476   }
477
478   if (!channel_) {
479     delete message;
480     return false;
481   }
482
483   return channel_->Send(message);
484 }
485
486 void GpuChannel::RequeueMessage() {
487   DCHECK(currently_processing_message_);
488   deferred_messages_.push_front(
489       new IPC::Message(*currently_processing_message_));
490   messages_processed_--;
491   currently_processing_message_ = NULL;
492 }
493
494 void GpuChannel::OnScheduled() {
495   if (handle_messages_scheduled_)
496     return;
497   // Post a task to handle any deferred messages. The deferred message queue is
498   // not emptied here, which ensures that OnMessageReceived will continue to
499   // defer newly received messages until the ones in the queue have all been
500   // handled by HandleMessage. HandleMessage is invoked as a
501   // task to prevent reentrancy.
502   base::MessageLoop::current()->PostTask(
503       FROM_HERE,
504       base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
505   handle_messages_scheduled_ = true;
506 }
507
508 void GpuChannel::StubSchedulingChanged(bool scheduled) {
509   bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
510   if (scheduled) {
511     num_stubs_descheduled_--;
512     OnScheduled();
513   } else {
514     num_stubs_descheduled_++;
515   }
516   DCHECK_LE(num_stubs_descheduled_, stubs_.size());
517   bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
518
519   if (a_stub_is_descheduled != a_stub_was_descheduled) {
520     if (preempting_flag_.get()) {
521       io_message_loop_->PostTask(
522           FROM_HERE,
523           base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
524                      filter_,
525                      a_stub_is_descheduled));
526     }
527   }
528 }
529
530 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
531     const gfx::GLSurfaceHandle& window,
532     int32 surface_id,
533     const GPUCreateCommandBufferConfig& init_params,
534     int32 route_id) {
535   TRACE_EVENT1("gpu",
536                "GpuChannel::CreateViewCommandBuffer",
537                "surface_id",
538                surface_id);
539
540   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
541
542   // Virtualize compositor contexts on OS X to prevent performance regressions
543   // when enabling FCM.
544   // http://crbug.com/180463
545   bool use_virtualized_gl_context = false;
546 #if defined(OS_MACOSX)
547   use_virtualized_gl_context = true;
548 #endif
549
550   scoped_ptr<GpuCommandBufferStub> stub(
551       new GpuCommandBufferStub(this,
552                                share_group,
553                                window,
554                                mailbox_manager_.get(),
555                                image_manager_.get(),
556                                gfx::Size(),
557                                disallowed_features_,
558                                init_params.attribs,
559                                init_params.gpu_preference,
560                                use_virtualized_gl_context,
561                                route_id,
562                                surface_id,
563                                watchdog_,
564                                software_,
565                                init_params.active_url));
566   if (preempted_flag_.get())
567     stub->SetPreemptByFlag(preempted_flag_);
568   if (!router_.AddRoute(route_id, stub.get())) {
569     DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
570                    "failed to add route";
571     return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
572   }
573   stubs_.AddWithID(stub.release(), route_id);
574   return CREATE_COMMAND_BUFFER_SUCCEEDED;
575 }
576
577 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
578   return stubs_.Lookup(route_id);
579 }
580
581 void GpuChannel::CreateImage(
582     gfx::PluginWindowHandle window,
583     int32 image_id,
584     gfx::Size* size) {
585   TRACE_EVENT1("gpu",
586                "GpuChannel::CreateImage",
587                "image_id",
588                image_id);
589
590   *size = gfx::Size();
591
592   if (image_manager_->LookupImage(image_id)) {
593     LOG(ERROR) << "CreateImage failed, image_id already in use.";
594     return;
595   }
596
597   scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
598   if (!image.get())
599     return;
600
601   image_manager_->AddImage(image.get(), image_id);
602   *size = image->GetSize();
603 }
604
605 void GpuChannel::DeleteImage(int32 image_id) {
606   TRACE_EVENT1("gpu",
607                "GpuChannel::DeleteImage",
608                "image_id",
609                image_id);
610
611   image_manager_->RemoveImage(image_id);
612 }
613
614 void GpuChannel::LoseAllContexts() {
615   gpu_channel_manager_->LoseAllContexts();
616 }
617
618 void GpuChannel::MarkAllContextsLost() {
619   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
620        !it.IsAtEnd(); it.Advance()) {
621     it.GetCurrentValue()->MarkContextLost();
622   }
623 }
624
625 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
626   return router_.AddRoute(route_id, listener);
627 }
628
629 void GpuChannel::RemoveRoute(int32 route_id) {
630   router_.RemoveRoute(route_id);
631 }
632
633 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
634   if (!preempting_flag_.get()) {
635     preempting_flag_ = new gpu::PreemptionFlag;
636     io_message_loop_->PostTask(
637         FROM_HERE, base::Bind(
638             &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
639             filter_, preempting_flag_, num_stubs_descheduled_ > 0));
640   }
641   return preempting_flag_.get();
642 }
643
644 void GpuChannel::SetPreemptByFlag(
645     scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
646   preempted_flag_ = preempted_flag;
647
648   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
649        !it.IsAtEnd(); it.Advance()) {
650     it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
651   }
652 }
653
654 void GpuChannel::OnDestroy() {
655   TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
656   gpu_channel_manager_->RemoveChannel(client_id_);
657 }
658
659 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
660   bool handled = true;
661   IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
662     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
663                         OnCreateOffscreenCommandBuffer)
664     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
665                         OnDestroyCommandBuffer)
666     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
667                         OnDevToolsStartEventsRecording)
668     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
669                         OnDevToolsStopEventsRecording)
670     IPC_MESSAGE_UNHANDLED(handled = false)
671   IPC_END_MESSAGE_MAP()
672   DCHECK(handled) << msg.type();
673   return handled;
674 }
675
676 void GpuChannel::HandleMessage() {
677   handle_messages_scheduled_ = false;
678   if (deferred_messages_.empty())
679     return;
680
681   bool should_fast_track_ack = false;
682   IPC::Message* m = deferred_messages_.front();
683   GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
684
685   do {
686     if (stub) {
687       if (!stub->IsScheduled())
688         return;
689       if (stub->IsPreempted()) {
690         OnScheduled();
691         return;
692       }
693     }
694
695     scoped_ptr<IPC::Message> message(m);
696     deferred_messages_.pop_front();
697     bool message_processed = true;
698
699     currently_processing_message_ = message.get();
700     bool result;
701     if (message->routing_id() == MSG_ROUTING_CONTROL)
702       result = OnControlMessageReceived(*message);
703     else
704       result = router_.RouteMessage(*message);
705     currently_processing_message_ = NULL;
706
707     if (!result) {
708       // Respond to sync messages even if router failed to route.
709       if (message->is_sync()) {
710         IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
711         reply->set_reply_error();
712         Send(reply);
713       }
714     } else {
715       // If the command buffer becomes unscheduled as a result of handling the
716       // message but still has more commands to process, synthesize an IPC
717       // message to flush that command buffer.
718       if (stub) {
719         if (stub->HasUnprocessedCommands()) {
720           deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
721               stub->route_id()));
722           message_processed = false;
723         }
724       }
725     }
726     if (message_processed)
727       MessageProcessed();
728
729     // We want the EchoACK following the SwapBuffers to be sent as close as
730     // possible, avoiding scheduling other channels in the meantime.
731     should_fast_track_ack = false;
732     if (!deferred_messages_.empty()) {
733       m = deferred_messages_.front();
734       stub = stubs_.Lookup(m->routing_id());
735       should_fast_track_ack =
736           (m->type() == GpuCommandBufferMsg_Echo::ID) &&
737           stub && stub->IsScheduled();
738     }
739   } while (should_fast_track_ack);
740
741   if (!deferred_messages_.empty()) {
742     OnScheduled();
743   }
744 }
745
746 void GpuChannel::OnCreateOffscreenCommandBuffer(
747     const gfx::Size& size,
748     const GPUCreateCommandBufferConfig& init_params,
749     int32 route_id,
750     bool* succeeded) {
751   TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
752   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
753
754   scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
755       this,
756       share_group,
757       gfx::GLSurfaceHandle(),
758       mailbox_manager_.get(),
759       image_manager_.get(),
760       size,
761       disallowed_features_,
762       init_params.attribs,
763       init_params.gpu_preference,
764       false,
765       route_id,
766       0,
767       watchdog_,
768       software_,
769       init_params.active_url));
770   if (preempted_flag_.get())
771     stub->SetPreemptByFlag(preempted_flag_);
772   if (!router_.AddRoute(route_id, stub.get())) {
773     DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
774                    "failed to add route";
775     *succeeded = false;
776     return;
777   }
778   stubs_.AddWithID(stub.release(), route_id);
779   TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
780                "route_id", route_id);
781   *succeeded = true;
782 }
783
784 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
785   TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
786                "route_id", route_id);
787
788   GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
789   if (!stub)
790     return;
791   bool need_reschedule = (stub && !stub->IsScheduled());
792   router_.RemoveRoute(route_id);
793   stubs_.Remove(route_id);
794   // In case the renderer is currently blocked waiting for a sync reply from the
795   // stub, we need to make sure to reschedule the GpuChannel here.
796   if (need_reschedule) {
797     // This stub won't get a chance to reschedule, so update the count now.
798     StubSchedulingChanged(true);
799   }
800 }
801
802 void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id,
803                                                 bool* succeeded) {
804   *succeeded = devtools_gpu_agent_->StartEventsRecording(route_id);
805 }
806
807 void GpuChannel::OnDevToolsStopEventsRecording() {
808   devtools_gpu_agent_->StopEventsRecording();
809 }
810
811 void GpuChannel::MessageProcessed() {
812   messages_processed_++;
813   if (preempting_flag_.get()) {
814     io_message_loop_->PostTask(
815         FROM_HERE,
816         base::Bind(&GpuChannelMessageFilter::MessageProcessed,
817                    filter_,
818                    messages_processed_));
819   }
820 }
821
822 void GpuChannel::CacheShader(const std::string& key,
823                              const std::string& shader) {
824   gpu_channel_manager_->Send(
825       new GpuHostMsg_CacheShader(client_id_, key, shader));
826 }
827
828 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
829   channel_->AddFilter(filter);
830 }
831
832 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
833   channel_->RemoveFilter(filter);
834 }
835
836 uint64 GpuChannel::GetMemoryUsage() {
837   uint64 size = 0;
838   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
839        !it.IsAtEnd(); it.Advance()) {
840     size += it.GetCurrentValue()->GetMemoryUsage();
841   }
842   return size;
843 }
844
845 }  // namespace content