Upstream version 6.35.121.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / gpu_channel.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
8
9 #include "content/common/gpu/gpu_channel.h"
10
11 #include <queue>
12 #include <vector>
13
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "content/common/gpu/devtools_gpu_agent.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_manager.h"
29 #include "gpu/command_buffer/service/mailbox_manager.h"
30 #include "ipc/ipc_channel.h"
31 #include "ipc/ipc_channel_proxy.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_surface.h"
35
36 #if defined(OS_POSIX)
37 #include "ipc/ipc_channel_posix.h"
38 #endif
39
40 namespace content {
41 namespace {
42
43 // Number of milliseconds between successive vsync. Many GL commands block
44 // on vsync, so thresholds for preemption should be multiples of this.
45 const int64 kVsyncIntervalMs = 17;
46
47 // Amount of time that we will wait for an IPC to be processed before
48 // preempting. After a preemption, we must wait this long before triggering
49 // another preemption.
50 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
51
52 // Once we trigger a preemption, the maximum duration that we will wait
53 // before clearing the preemption.
54 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
55
56 // Stop the preemption once the time for the longest pending IPC drops
57 // below this threshold.
58 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
59
60 }  // anonymous namespace
61
62 // This filter does three things:
63 // - it counts and timestamps each message forwarded to the channel
64 //   so that we can preempt other channels if a message takes too long to
65 //   process. To guarantee fairness, we must wait a minimum amount of time
66 //   before preempting and we limit the amount of time that we can preempt in
67 //   one shot (see constants above).
68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
69 //   thread, generating the sync point ID and responding immediately, and then
70 //   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
71 //   into the channel's queue.
72 // - it generates mailbox names for clients of the GPU process on the IO thread.
73 class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
74  public:
75   // Takes ownership of gpu_channel (see below).
76   GpuChannelMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
77                           scoped_refptr<SyncPointManager> sync_point_manager,
78                           scoped_refptr<base::MessageLoopProxy> message_loop)
79       : preemption_state_(IDLE),
80         gpu_channel_(gpu_channel),
81         channel_(NULL),
82         sync_point_manager_(sync_point_manager),
83         message_loop_(message_loop),
84         messages_forwarded_to_channel_(0),
85         a_stub_is_descheduled_(false) {
86   }
87
88   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
89     DCHECK(!channel_);
90     channel_ = channel;
91   }
92
93   virtual void OnFilterRemoved() OVERRIDE {
94     DCHECK(channel_);
95     channel_ = NULL;
96   }
97
98   virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
99     DCHECK(channel_);
100
101     bool handled = false;
102     if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
103       // This message should not be sent explicitly by the renderer.
104       DLOG(ERROR) << "Client should not send "
105                      "GpuCommandBufferMsg_RetireSyncPoint message";
106       handled = true;
107     }
108
109     // All other messages get processed by the GpuChannel.
110     if (!handled) {
111       messages_forwarded_to_channel_++;
112       if (preempting_flag_.get())
113         pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
114       UpdatePreemptionState();
115     }
116
117     if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
118       uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
119       IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
120       GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
121       Send(reply);
122       message_loop_->PostTask(FROM_HERE, base::Bind(
123           &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
124           gpu_channel_,
125           sync_point_manager_,
126           message.routing_id(),
127           sync_point));
128       handled = true;
129     }
130     return handled;
131   }
132
133   void MessageProcessed(uint64 messages_processed) {
134     while (!pending_messages_.empty() &&
135            pending_messages_.front().message_number <= messages_processed)
136       pending_messages_.pop();
137     UpdatePreemptionState();
138   }
139
140   void SetPreemptingFlagAndSchedulingState(
141       gpu::PreemptionFlag* preempting_flag,
142       bool a_stub_is_descheduled) {
143     preempting_flag_ = preempting_flag;
144     a_stub_is_descheduled_ = a_stub_is_descheduled;
145   }
146
147   void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
148     a_stub_is_descheduled_ = a_stub_is_descheduled;
149     UpdatePreemptionState();
150   }
151
152   bool Send(IPC::Message* message) {
153     return channel_->Send(message);
154   }
155
156  protected:
157   virtual ~GpuChannelMessageFilter() {
158     message_loop_->PostTask(FROM_HERE, base::Bind(
159         &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
160   }
161
162  private:
163   enum PreemptionState {
164     // Either there's no other channel to preempt, there are no messages
165     // pending processing, or we just finished preempting and have to wait
166     // before preempting again.
167     IDLE,
168     // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
169     WAITING,
170     // We can preempt whenever any IPC processing takes more than
171     // kPreemptWaitTimeMs.
172     CHECKING,
173     // We are currently preempting (i.e. no stub is descheduled).
174     PREEMPTING,
175     // We would like to preempt, but some stub is descheduled.
176     WOULD_PREEMPT_DESCHEDULED,
177   };
178
179   PreemptionState preemption_state_;
180
181   // Maximum amount of time that we can spend in PREEMPTING.
182   // It is reset when we transition to IDLE.
183   base::TimeDelta max_preemption_time_;
184
185   struct PendingMessage {
186     uint64 message_number;
187     base::TimeTicks time_received;
188
189     explicit PendingMessage(uint64 message_number)
190         : message_number(message_number),
191           time_received(base::TimeTicks::Now()) {
192     }
193   };
194
195   void UpdatePreemptionState() {
196     switch (preemption_state_) {
197       case IDLE:
198         if (preempting_flag_.get() && !pending_messages_.empty())
199           TransitionToWaiting();
200         break;
201       case WAITING:
202         // A timer will transition us to CHECKING.
203         DCHECK(timer_.IsRunning());
204         break;
205       case CHECKING:
206         if (!pending_messages_.empty()) {
207           base::TimeDelta time_elapsed =
208               base::TimeTicks::Now() - pending_messages_.front().time_received;
209           if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
210             // Schedule another check for when the IPC may go long.
211             timer_.Start(
212                 FROM_HERE,
213                 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
214                     time_elapsed,
215                 this, &GpuChannelMessageFilter::UpdatePreemptionState);
216           } else {
217             if (a_stub_is_descheduled_)
218               TransitionToWouldPreemptDescheduled();
219             else
220               TransitionToPreempting();
221           }
222         }
223         break;
224       case PREEMPTING:
225         // A TransitionToIdle() timer should always be running in this state.
226         DCHECK(timer_.IsRunning());
227         if (a_stub_is_descheduled_)
228           TransitionToWouldPreemptDescheduled();
229         else
230           TransitionToIdleIfCaughtUp();
231         break;
232       case WOULD_PREEMPT_DESCHEDULED:
233         // A TransitionToIdle() timer should never be running in this state.
234         DCHECK(!timer_.IsRunning());
235         if (!a_stub_is_descheduled_)
236           TransitionToPreempting();
237         else
238           TransitionToIdleIfCaughtUp();
239         break;
240       default:
241         NOTREACHED();
242     }
243   }
244
245   void TransitionToIdleIfCaughtUp() {
246     DCHECK(preemption_state_ == PREEMPTING ||
247            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
248     if (pending_messages_.empty()) {
249       TransitionToIdle();
250     } else {
251       base::TimeDelta time_elapsed =
252           base::TimeTicks::Now() - pending_messages_.front().time_received;
253       if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
254         TransitionToIdle();
255     }
256   }
257
258   void TransitionToIdle() {
259     DCHECK(preemption_state_ == PREEMPTING ||
260            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
261     // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
262     timer_.Stop();
263
264     preemption_state_ = IDLE;
265     preempting_flag_->Reset();
266     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
267
268     UpdatePreemptionState();
269   }
270
271   void TransitionToWaiting() {
272     DCHECK_EQ(preemption_state_, IDLE);
273     DCHECK(!timer_.IsRunning());
274
275     preemption_state_ = WAITING;
276     timer_.Start(
277         FROM_HERE,
278         base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
279         this, &GpuChannelMessageFilter::TransitionToChecking);
280   }
281
282   void TransitionToChecking() {
283     DCHECK_EQ(preemption_state_, WAITING);
284     DCHECK(!timer_.IsRunning());
285
286     preemption_state_ = CHECKING;
287     max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
288     UpdatePreemptionState();
289   }
290
291   void TransitionToPreempting() {
292     DCHECK(preemption_state_ == CHECKING ||
293            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
294     DCHECK(!a_stub_is_descheduled_);
295
296     // Stop any pending state update checks that we may have queued
297     // while CHECKING.
298     if (preemption_state_ == CHECKING)
299       timer_.Stop();
300
301     preemption_state_ = PREEMPTING;
302     preempting_flag_->Set();
303     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
304
305     timer_.Start(
306        FROM_HERE,
307        max_preemption_time_,
308        this, &GpuChannelMessageFilter::TransitionToIdle);
309
310     UpdatePreemptionState();
311   }
312
313   void TransitionToWouldPreemptDescheduled() {
314     DCHECK(preemption_state_ == CHECKING ||
315            preemption_state_ == PREEMPTING);
316     DCHECK(a_stub_is_descheduled_);
317
318     if (preemption_state_ == CHECKING) {
319       // Stop any pending state update checks that we may have queued
320       // while CHECKING.
321       timer_.Stop();
322     } else {
323       // Stop any TransitionToIdle() timers that we may have queued
324       // while PREEMPTING.
325       timer_.Stop();
326       max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
327       if (max_preemption_time_ < base::TimeDelta()) {
328         TransitionToIdle();
329         return;
330       }
331     }
332
333     preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
334     preempting_flag_->Reset();
335     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
336
337     UpdatePreemptionState();
338   }
339
340   static void InsertSyncPointOnMainThread(
341       base::WeakPtr<GpuChannel>* gpu_channel,
342       scoped_refptr<SyncPointManager> manager,
343       int32 routing_id,
344       uint32 sync_point) {
345     // This function must ensure that the sync point will be retired. Normally
346     // we'll find the stub based on the routing ID, and associate the sync point
347     // with it, but if that fails for any reason (channel or stub already
348     // deleted, invalid routing id), we need to retire the sync point
349     // immediately.
350     if (gpu_channel->get()) {
351       GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
352           routing_id);
353       if (stub) {
354         stub->AddSyncPoint(sync_point);
355         GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
356         gpu_channel->get()->OnMessageReceived(message);
357         return;
358       } else {
359         gpu_channel->get()->MessageProcessed();
360       }
361     }
362     manager->RetireSyncPoint(sync_point);
363   }
364
365   static void DeleteWeakPtrOnMainThread(
366       base::WeakPtr<GpuChannel>* gpu_channel) {
367     delete gpu_channel;
368   }
369
370   // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
371   // IO thread, it's only passed through - therefore the WeakPtr assumptions are
372   // respected.
373   base::WeakPtr<GpuChannel>* gpu_channel_;
374   IPC::Channel* channel_;
375   scoped_refptr<SyncPointManager> sync_point_manager_;
376   scoped_refptr<base::MessageLoopProxy> message_loop_;
377   scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
378
379   std::queue<PendingMessage> pending_messages_;
380
381   // Count of the number of IPCs forwarded to the GpuChannel.
382   uint64 messages_forwarded_to_channel_;
383
384   base::OneShotTimer<GpuChannelMessageFilter> timer_;
385
386   bool a_stub_is_descheduled_;
387 };
388
389 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
390                        GpuWatchdog* watchdog,
391                        gfx::GLShareGroup* share_group,
392                        gpu::gles2::MailboxManager* mailbox,
393                        int client_id,
394                        bool software)
395     : gpu_channel_manager_(gpu_channel_manager),
396       messages_processed_(0),
397       client_id_(client_id),
398       share_group_(share_group ? share_group : new gfx::GLShareGroup),
399       mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
400       image_manager_(new gpu::gles2::ImageManager),
401       watchdog_(watchdog),
402       software_(software),
403       handle_messages_scheduled_(false),
404       processed_get_state_fast_(false),
405       currently_processing_message_(NULL),
406       weak_factory_(this),
407       num_stubs_descheduled_(0) {
408   DCHECK(gpu_channel_manager);
409   DCHECK(client_id);
410
411   channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
412   const CommandLine* command_line = CommandLine::ForCurrentProcess();
413   log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
414 }
415
416
417 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
418                       base::WaitableEvent* shutdown_event) {
419   DCHECK(!channel_.get());
420
421   // Map renderer ID to a (single) channel to that process.
422   channel_.reset(new IPC::SyncChannel(
423       channel_id_,
424       IPC::Channel::MODE_SERVER,
425       this,
426       io_message_loop,
427       false,
428       shutdown_event));
429
430   base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
431       weak_factory_.GetWeakPtr()));
432
433   filter_ = new GpuChannelMessageFilter(
434       weak_ptr,
435       gpu_channel_manager_->sync_point_manager(),
436       base::MessageLoopProxy::current());
437   io_message_loop_ = io_message_loop;
438   channel_->AddFilter(filter_.get());
439
440   devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
441 }
442
443 std::string GpuChannel::GetChannelName() {
444   return channel_id_;
445 }
446
447 #if defined(OS_POSIX)
448 int GpuChannel::TakeRendererFileDescriptor() {
449   if (!channel_) {
450     NOTREACHED();
451     return -1;
452   }
453   return channel_->TakeClientFileDescriptor();
454 }
455 #endif  // defined(OS_POSIX)
456
457 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
458   if (log_messages_) {
459     DVLOG(1) << "received message @" << &message << " on channel @" << this
460              << " with type " << message.type();
461   }
462
463   if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
464     if (processed_get_state_fast_) {
465       // Require a non-GetStateFast message in between two GetStateFast
466       // messages, to ensure progress is made.
467       std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
468
469       while (point != deferred_messages_.end() &&
470              (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
471         ++point;
472       }
473
474       if (point != deferred_messages_.end()) {
475         ++point;
476       }
477
478       deferred_messages_.insert(point, new IPC::Message(message));
479     } else {
480       // Move GetStateFast commands to the head of the queue, so the renderer
481       // doesn't have to wait any longer than necessary.
482       deferred_messages_.push_front(new IPC::Message(message));
483     }
484   } else {
485     deferred_messages_.push_back(new IPC::Message(message));
486   }
487
488   OnScheduled();
489
490   return true;
491 }
492
493 void GpuChannel::OnChannelError() {
494   gpu_channel_manager_->RemoveChannel(client_id_);
495 }
496
497 bool GpuChannel::Send(IPC::Message* message) {
498   // The GPU process must never send a synchronous IPC message to the renderer
499   // process. This could result in deadlock.
500   DCHECK(!message->is_sync());
501   if (log_messages_) {
502     DVLOG(1) << "sending message @" << message << " on channel @" << this
503              << " with type " << message->type();
504   }
505
506   if (!channel_) {
507     delete message;
508     return false;
509   }
510
511   return channel_->Send(message);
512 }
513
514 void GpuChannel::RequeueMessage() {
515   DCHECK(currently_processing_message_);
516   deferred_messages_.push_front(
517       new IPC::Message(*currently_processing_message_));
518   messages_processed_--;
519   currently_processing_message_ = NULL;
520 }
521
522 void GpuChannel::OnScheduled() {
523   if (handle_messages_scheduled_)
524     return;
525   // Post a task to handle any deferred messages. The deferred message queue is
526   // not emptied here, which ensures that OnMessageReceived will continue to
527   // defer newly received messages until the ones in the queue have all been
528   // handled by HandleMessage. HandleMessage is invoked as a
529   // task to prevent reentrancy.
530   base::MessageLoop::current()->PostTask(
531       FROM_HERE,
532       base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
533   handle_messages_scheduled_ = true;
534 }
535
536 void GpuChannel::StubSchedulingChanged(bool scheduled) {
537   bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
538   if (scheduled) {
539     num_stubs_descheduled_--;
540     OnScheduled();
541   } else {
542     num_stubs_descheduled_++;
543   }
544   DCHECK_LE(num_stubs_descheduled_, stubs_.size());
545   bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
546
547   if (a_stub_is_descheduled != a_stub_was_descheduled) {
548     if (preempting_flag_.get()) {
549       io_message_loop_->PostTask(
550           FROM_HERE,
551           base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
552                      filter_,
553                      a_stub_is_descheduled));
554     }
555   }
556 }
557
558 void GpuChannel::CreateViewCommandBuffer(
559     const gfx::GLSurfaceHandle& window,
560     int32 surface_id,
561     const GPUCreateCommandBufferConfig& init_params,
562     int32* route_id) {
563   TRACE_EVENT1("gpu",
564                "GpuChannel::CreateViewCommandBuffer",
565                "surface_id",
566                surface_id);
567
568   *route_id = MSG_ROUTING_NONE;
569
570   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
571
572   // Virtualize compositor contexts on OS X to prevent performance regressions
573   // when enabling FCM.
574   // http://crbug.com/180463
575   bool use_virtualized_gl_context = false;
576 #if defined(OS_MACOSX)
577   use_virtualized_gl_context = true;
578 #endif
579
580   *route_id = GenerateRouteID();
581   scoped_ptr<GpuCommandBufferStub> stub(
582       new GpuCommandBufferStub(this,
583                                share_group,
584                                window,
585                                mailbox_manager_.get(),
586                                image_manager_.get(),
587                                gfx::Size(),
588                                disallowed_features_,
589                                init_params.attribs,
590                                init_params.gpu_preference,
591                                use_virtualized_gl_context,
592                                *route_id,
593                                surface_id,
594                                watchdog_,
595                                software_,
596                                init_params.active_url));
597   if (preempted_flag_.get())
598     stub->SetPreemptByFlag(preempted_flag_);
599   router_.AddRoute(*route_id, stub.get());
600   stubs_.AddWithID(stub.release(), *route_id);
601 }
602
603 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
604   return stubs_.Lookup(route_id);
605 }
606
607 void GpuChannel::CreateImage(
608     gfx::PluginWindowHandle window,
609     int32 image_id,
610     gfx::Size* size) {
611   TRACE_EVENT1("gpu",
612                "GpuChannel::CreateImage",
613                "image_id",
614                image_id);
615
616   *size = gfx::Size();
617
618   if (image_manager_->LookupImage(image_id)) {
619     LOG(ERROR) << "CreateImage failed, image_id already in use.";
620     return;
621   }
622
623   scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
624   if (!image.get())
625     return;
626
627   image_manager_->AddImage(image.get(), image_id);
628   *size = image->GetSize();
629 }
630
631 void GpuChannel::DeleteImage(int32 image_id) {
632   TRACE_EVENT1("gpu",
633                "GpuChannel::DeleteImage",
634                "image_id",
635                image_id);
636
637   image_manager_->RemoveImage(image_id);
638 }
639
640 void GpuChannel::LoseAllContexts() {
641   gpu_channel_manager_->LoseAllContexts();
642 }
643
644 void GpuChannel::MarkAllContextsLost() {
645   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
646        !it.IsAtEnd(); it.Advance()) {
647     it.GetCurrentValue()->MarkContextLost();
648   }
649 }
650
651 void GpuChannel::DestroySoon() {
652   base::MessageLoop::current()->PostTask(
653       FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
654 }
655
656 int32 GpuChannel::GenerateRouteID() {
657   static int32 last_id = 0;
658   return ++last_id;
659 }
660
661 void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
662   router_.AddRoute(route_id, listener);
663 }
664
665 void GpuChannel::RemoveRoute(int32 route_id) {
666   router_.RemoveRoute(route_id);
667 }
668
669 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
670   if (!preempting_flag_.get()) {
671     preempting_flag_ = new gpu::PreemptionFlag;
672     io_message_loop_->PostTask(
673         FROM_HERE, base::Bind(
674             &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
675             filter_, preempting_flag_, num_stubs_descheduled_ > 0));
676   }
677   return preempting_flag_.get();
678 }
679
680 void GpuChannel::SetPreemptByFlag(
681     scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
682   preempted_flag_ = preempted_flag;
683
684   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
685        !it.IsAtEnd(); it.Advance()) {
686     it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
687   }
688 }
689
690 GpuChannel::~GpuChannel() {
691   if (preempting_flag_.get())
692     preempting_flag_->Reset();
693 }
694
695 void GpuChannel::OnDestroy() {
696   TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
697   gpu_channel_manager_->RemoveChannel(client_id_);
698 }
699
700 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
701   bool handled = true;
702   IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
703     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
704                         OnCreateOffscreenCommandBuffer)
705     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
706                         OnDestroyCommandBuffer)
707     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
708     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
709                         OnDestroyVideoEncoder)
710     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
711                         OnDevToolsStartEventsRecording)
712     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
713                         OnDevToolsStopEventsRecording)
714     IPC_MESSAGE_UNHANDLED(handled = false)
715   IPC_END_MESSAGE_MAP()
716   DCHECK(handled) << msg.type();
717   return handled;
718 }
719
720 void GpuChannel::HandleMessage() {
721   handle_messages_scheduled_ = false;
722   if (deferred_messages_.empty())
723     return;
724
725   bool should_fast_track_ack = false;
726   IPC::Message* m = deferred_messages_.front();
727   GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
728
729   do {
730     if (stub) {
731       if (!stub->IsScheduled())
732         return;
733       if (stub->IsPreempted()) {
734         OnScheduled();
735         return;
736       }
737     }
738
739     scoped_ptr<IPC::Message> message(m);
740     deferred_messages_.pop_front();
741     bool message_processed = true;
742
743     processed_get_state_fast_ =
744         (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
745
746     currently_processing_message_ = message.get();
747     bool result;
748     if (message->routing_id() == MSG_ROUTING_CONTROL)
749       result = OnControlMessageReceived(*message);
750     else
751       result = router_.RouteMessage(*message);
752     currently_processing_message_ = NULL;
753
754     if (!result) {
755       // Respond to sync messages even if router failed to route.
756       if (message->is_sync()) {
757         IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
758         reply->set_reply_error();
759         Send(reply);
760       }
761     } else {
762       // If the command buffer becomes unscheduled as a result of handling the
763       // message but still has more commands to process, synthesize an IPC
764       // message to flush that command buffer.
765       if (stub) {
766         if (stub->HasUnprocessedCommands()) {
767           deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
768               stub->route_id()));
769           message_processed = false;
770         }
771       }
772     }
773     if (message_processed)
774       MessageProcessed();
775
776     // We want the EchoACK following the SwapBuffers to be sent as close as
777     // possible, avoiding scheduling other channels in the meantime.
778     should_fast_track_ack = false;
779     if (!deferred_messages_.empty()) {
780       m = deferred_messages_.front();
781       stub = stubs_.Lookup(m->routing_id());
782       should_fast_track_ack =
783           (m->type() == GpuCommandBufferMsg_Echo::ID) &&
784           stub && stub->IsScheduled();
785     }
786   } while (should_fast_track_ack);
787
788   if (!deferred_messages_.empty()) {
789     OnScheduled();
790   }
791 }
792
793 void GpuChannel::OnCreateOffscreenCommandBuffer(
794     const gfx::Size& size,
795     const GPUCreateCommandBufferConfig& init_params,
796     int32* route_id) {
797   TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
798   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
799
800   *route_id = GenerateRouteID();
801
802   scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
803       this,
804       share_group,
805       gfx::GLSurfaceHandle(),
806       mailbox_manager_.get(),
807       image_manager_.get(),
808       size,
809       disallowed_features_,
810       init_params.attribs,
811       init_params.gpu_preference,
812       false,
813       *route_id,
814       0,
815       watchdog_,
816       software_,
817       init_params.active_url));
818   if (preempted_flag_.get())
819     stub->SetPreemptByFlag(preempted_flag_);
820   router_.AddRoute(*route_id, stub.get());
821   stubs_.AddWithID(stub.release(), *route_id);
822   TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
823                "route_id", route_id);
824 }
825
826 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
827   TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
828                "route_id", route_id);
829
830   GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
831   if (!stub)
832     return;
833   bool need_reschedule = (stub && !stub->IsScheduled());
834   router_.RemoveRoute(route_id);
835   stubs_.Remove(route_id);
836   // In case the renderer is currently blocked waiting for a sync reply from the
837   // stub, we need to make sure to reschedule the GpuChannel here.
838   if (need_reschedule) {
839     // This stub won't get a chance to reschedule, so update the count now.
840     StubSchedulingChanged(true);
841   }
842 }
843
844 void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
845   TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
846
847   *route_id = GenerateRouteID();
848   GpuVideoEncodeAccelerator* encoder =
849       new GpuVideoEncodeAccelerator(this, *route_id);
850   router_.AddRoute(*route_id, encoder);
851   video_encoders_.AddWithID(encoder, *route_id);
852 }
853
854 void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
855   TRACE_EVENT1(
856       "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
857   GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
858   if (!encoder)
859     return;
860   router_.RemoveRoute(route_id);
861   video_encoders_.Remove(route_id);
862 }
863
864 void GpuChannel::OnDevToolsStartEventsRecording(int32* route_id) {
865   devtools_gpu_agent_->StartEventsRecording(route_id);
866 }
867
868 void GpuChannel::OnDevToolsStopEventsRecording() {
869   devtools_gpu_agent_->StopEventsRecording();
870 }
871
872 void GpuChannel::MessageProcessed() {
873   messages_processed_++;
874   if (preempting_flag_.get()) {
875     io_message_loop_->PostTask(
876         FROM_HERE,
877         base::Bind(&GpuChannelMessageFilter::MessageProcessed,
878                    filter_,
879                    messages_processed_));
880   }
881 }
882
883 void GpuChannel::CacheShader(const std::string& key,
884                              const std::string& shader) {
885   gpu_channel_manager_->Send(
886       new GpuHostMsg_CacheShader(client_id_, key, shader));
887 }
888
889 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
890   channel_->AddFilter(filter);
891 }
892
893 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
894   channel_->RemoveFilter(filter);
895 }
896
897 uint64 GpuChannel::GetMemoryUsage() {
898   uint64 size = 0;
899   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
900        !it.IsAtEnd(); it.Advance()) {
901     size += it.GetCurrentValue()->GetMemoryUsage();
902   }
903   return size;
904 }
905
906 }  // namespace content