f02a69fdddf44ebeecba983e972ff603034d6c68
[platform/framework/web/crosswalk.git] / src / content / common / gpu / gpu_channel.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
8
9 #include "content/common/gpu/gpu_channel.h"
10
11 #include <queue>
12 #include <vector>
13
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/rand_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/devtools_gpu_agent.h"
22 #include "content/common/gpu/gpu_channel_manager.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
25 #include "content/common/gpu/sync_point_manager.h"
26 #include "content/public/common/content_switches.h"
27 #include "crypto/hmac.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gpu_scheduler.h"
30 #include "gpu/command_buffer/service/image_manager.h"
31 #include "gpu/command_buffer/service/mailbox_manager.h"
32 #include "ipc/ipc_channel.h"
33 #include "ipc/ipc_channel_proxy.h"
34 #include "ui/gl/gl_context.h"
35 #include "ui/gl/gl_image.h"
36 #include "ui/gl/gl_surface.h"
37
38 #if defined(OS_POSIX)
39 #include "ipc/ipc_channel_posix.h"
40 #endif
41
42 namespace content {
43 namespace {
44
45 // Number of milliseconds between successive vsync. Many GL commands block
46 // on vsync, so thresholds for preemption should be multiples of this.
47 const int64 kVsyncIntervalMs = 17;
48
49 // Amount of time that we will wait for an IPC to be processed before
50 // preempting. After a preemption, we must wait this long before triggering
51 // another preemption.
52 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
53
54 // Once we trigger a preemption, the maximum duration that we will wait
55 // before clearing the preemption.
56 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
57
58 // Stop the preemption once the time for the longest pending IPC drops
59 // below this threshold.
60 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
61
62 }  // anonymous namespace
63
64 // This filter does three things:
65 // - it counts and timestamps each message forwarded to the channel
66 //   so that we can preempt other channels if a message takes too long to
67 //   process. To guarantee fairness, we must wait a minimum amount of time
68 //   before preempting and we limit the amount of time that we can preempt in
69 //   one shot (see constants above).
70 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
71 //   thread, generating the sync point ID and responding immediately, and then
72 //   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
73 //   into the channel's queue.
74 // - it generates mailbox names for clients of the GPU process on the IO thread.
75 class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
76  public:
77   // Takes ownership of gpu_channel (see below).
78   GpuChannelMessageFilter(const std::string& private_key,
79                           base::WeakPtr<GpuChannel>* gpu_channel,
80                           scoped_refptr<SyncPointManager> sync_point_manager,
81                           scoped_refptr<base::MessageLoopProxy> message_loop)
82       : preemption_state_(IDLE),
83         gpu_channel_(gpu_channel),
84         channel_(NULL),
85         sync_point_manager_(sync_point_manager),
86         message_loop_(message_loop),
87         messages_forwarded_to_channel_(0),
88         a_stub_is_descheduled_(false),
89         hmac_(crypto::HMAC::SHA256) {
90     bool success = hmac_.Init(base::StringPiece(private_key));
91     DCHECK(success);
92   }
93
94   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
95     DCHECK(!channel_);
96     channel_ = channel;
97   }
98
99   virtual void OnFilterRemoved() OVERRIDE {
100     DCHECK(channel_);
101     channel_ = NULL;
102   }
103
104   virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
105     DCHECK(channel_);
106
107     bool handled = true;
108     IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter, message)
109       IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames,
110                           OnGenerateMailboxNames)
111       IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync,
112                           OnGenerateMailboxNamesAsync)
113       IPC_MESSAGE_UNHANDLED(handled = false)
114     IPC_END_MESSAGE_MAP()
115
116     if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
117       // This message should not be sent explicitly by the renderer.
118       NOTREACHED();
119       handled = true;
120     }
121
122     // All other messages get processed by the GpuChannel.
123     if (!handled) {
124       messages_forwarded_to_channel_++;
125       if (preempting_flag_.get())
126         pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
127       UpdatePreemptionState();
128     }
129
130     if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
131       uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
132       IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
133       GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
134       Send(reply);
135       message_loop_->PostTask(FROM_HERE, base::Bind(
136           &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
137           gpu_channel_,
138           sync_point_manager_,
139           message.routing_id(),
140           sync_point));
141       handled = true;
142     }
143     return handled;
144   }
145
146   void MessageProcessed(uint64 messages_processed) {
147     while (!pending_messages_.empty() &&
148            pending_messages_.front().message_number <= messages_processed)
149       pending_messages_.pop();
150     UpdatePreemptionState();
151   }
152
153   void SetPreemptingFlagAndSchedulingState(
154       gpu::PreemptionFlag* preempting_flag,
155       bool a_stub_is_descheduled) {
156     preempting_flag_ = preempting_flag;
157     a_stub_is_descheduled_ = a_stub_is_descheduled;
158   }
159
160   void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
161     a_stub_is_descheduled_ = a_stub_is_descheduled;
162     UpdatePreemptionState();
163   }
164
165   bool Send(IPC::Message* message) {
166     return channel_->Send(message);
167   }
168
169  protected:
170   virtual ~GpuChannelMessageFilter() {
171     message_loop_->PostTask(FROM_HERE, base::Bind(
172         &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
173   }
174
175  private:
176   // Message handlers.
177   void OnGenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* result) {
178     TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num);
179
180     result->resize(num);
181
182     for (unsigned i = 0; i < num; ++i) {
183       char name[GL_MAILBOX_SIZE_CHROMIUM];
184       base::RandBytes(name, sizeof(name) / 2);
185
186       bool success = hmac_.Sign(
187           base::StringPiece(name, sizeof(name) / 2),
188           reinterpret_cast<unsigned char*>(name) + sizeof(name) / 2,
189           sizeof(name) / 2);
190       DCHECK(success);
191
192       (*result)[i].SetName(reinterpret_cast<int8*>(name));
193     }
194   }
195
196   void OnGenerateMailboxNamesAsync(unsigned num) {
197     std::vector<gpu::Mailbox> names;
198     OnGenerateMailboxNames(num, &names);
199     Send(new GpuChannelMsg_GenerateMailboxNamesReply(names));
200   }
201
202   enum PreemptionState {
203     // Either there's no other channel to preempt, there are no messages
204     // pending processing, or we just finished preempting and have to wait
205     // before preempting again.
206     IDLE,
207     // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
208     WAITING,
209     // We can preempt whenever any IPC processing takes more than
210     // kPreemptWaitTimeMs.
211     CHECKING,
212     // We are currently preempting (i.e. no stub is descheduled).
213     PREEMPTING,
214     // We would like to preempt, but some stub is descheduled.
215     WOULD_PREEMPT_DESCHEDULED,
216   };
217
218   PreemptionState preemption_state_;
219
220   // Maximum amount of time that we can spend in PREEMPTING.
221   // It is reset when we transition to IDLE.
222   base::TimeDelta max_preemption_time_;
223
224   struct PendingMessage {
225     uint64 message_number;
226     base::TimeTicks time_received;
227
228     explicit PendingMessage(uint64 message_number)
229         : message_number(message_number),
230           time_received(base::TimeTicks::Now()) {
231     }
232   };
233
234   void UpdatePreemptionState() {
235     switch (preemption_state_) {
236       case IDLE:
237         if (preempting_flag_.get() && !pending_messages_.empty())
238           TransitionToWaiting();
239         break;
240       case WAITING:
241         // A timer will transition us to CHECKING.
242         DCHECK(timer_.IsRunning());
243         break;
244       case CHECKING:
245         if (!pending_messages_.empty()) {
246           base::TimeDelta time_elapsed =
247               base::TimeTicks::Now() - pending_messages_.front().time_received;
248           if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
249             // Schedule another check for when the IPC may go long.
250             timer_.Start(
251                 FROM_HERE,
252                 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
253                     time_elapsed,
254                 this, &GpuChannelMessageFilter::UpdatePreemptionState);
255           } else {
256             if (a_stub_is_descheduled_)
257               TransitionToWouldPreemptDescheduled();
258             else
259               TransitionToPreempting();
260           }
261         }
262         break;
263       case PREEMPTING:
264         // A TransitionToIdle() timer should always be running in this state.
265         DCHECK(timer_.IsRunning());
266         if (a_stub_is_descheduled_)
267           TransitionToWouldPreemptDescheduled();
268         else
269           TransitionToIdleIfCaughtUp();
270         break;
271       case WOULD_PREEMPT_DESCHEDULED:
272         // A TransitionToIdle() timer should never be running in this state.
273         DCHECK(!timer_.IsRunning());
274         if (!a_stub_is_descheduled_)
275           TransitionToPreempting();
276         else
277           TransitionToIdleIfCaughtUp();
278         break;
279       default:
280         NOTREACHED();
281     }
282   }
283
284   void TransitionToIdleIfCaughtUp() {
285     DCHECK(preemption_state_ == PREEMPTING ||
286            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
287     if (pending_messages_.empty()) {
288       TransitionToIdle();
289     } else {
290       base::TimeDelta time_elapsed =
291           base::TimeTicks::Now() - pending_messages_.front().time_received;
292       if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
293         TransitionToIdle();
294     }
295   }
296
297   void TransitionToIdle() {
298     DCHECK(preemption_state_ == PREEMPTING ||
299            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
300     // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
301     timer_.Stop();
302
303     preemption_state_ = IDLE;
304     preempting_flag_->Reset();
305     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
306
307     UpdatePreemptionState();
308   }
309
310   void TransitionToWaiting() {
311     DCHECK_EQ(preemption_state_, IDLE);
312     DCHECK(!timer_.IsRunning());
313
314     preemption_state_ = WAITING;
315     timer_.Start(
316         FROM_HERE,
317         base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
318         this, &GpuChannelMessageFilter::TransitionToChecking);
319   }
320
321   void TransitionToChecking() {
322     DCHECK_EQ(preemption_state_, WAITING);
323     DCHECK(!timer_.IsRunning());
324
325     preemption_state_ = CHECKING;
326     max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
327     UpdatePreemptionState();
328   }
329
330   void TransitionToPreempting() {
331     DCHECK(preemption_state_ == CHECKING ||
332            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
333     DCHECK(!a_stub_is_descheduled_);
334
335     // Stop any pending state update checks that we may have queued
336     // while CHECKING.
337     if (preemption_state_ == CHECKING)
338       timer_.Stop();
339
340     preemption_state_ = PREEMPTING;
341     preempting_flag_->Set();
342     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
343
344     timer_.Start(
345        FROM_HERE,
346        max_preemption_time_,
347        this, &GpuChannelMessageFilter::TransitionToIdle);
348
349     UpdatePreemptionState();
350   }
351
352   void TransitionToWouldPreemptDescheduled() {
353     DCHECK(preemption_state_ == CHECKING ||
354            preemption_state_ == PREEMPTING);
355     DCHECK(a_stub_is_descheduled_);
356
357     if (preemption_state_ == CHECKING) {
358       // Stop any pending state update checks that we may have queued
359       // while CHECKING.
360       timer_.Stop();
361     } else {
362       // Stop any TransitionToIdle() timers that we may have queued
363       // while PREEMPTING.
364       timer_.Stop();
365       max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
366       if (max_preemption_time_ < base::TimeDelta()) {
367         TransitionToIdle();
368         return;
369       }
370     }
371
372     preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
373     preempting_flag_->Reset();
374     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
375
376     UpdatePreemptionState();
377   }
378
379   static void InsertSyncPointOnMainThread(
380       base::WeakPtr<GpuChannel>* gpu_channel,
381       scoped_refptr<SyncPointManager> manager,
382       int32 routing_id,
383       uint32 sync_point) {
384     // This function must ensure that the sync point will be retired. Normally
385     // we'll find the stub based on the routing ID, and associate the sync point
386     // with it, but if that fails for any reason (channel or stub already
387     // deleted, invalid routing id), we need to retire the sync point
388     // immediately.
389     if (gpu_channel->get()) {
390       GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
391           routing_id);
392       if (stub) {
393         stub->AddSyncPoint(sync_point);
394         GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
395         gpu_channel->get()->OnMessageReceived(message);
396         return;
397       } else {
398         gpu_channel->get()->MessageProcessed();
399       }
400     }
401     manager->RetireSyncPoint(sync_point);
402   }
403
404   static void DeleteWeakPtrOnMainThread(
405       base::WeakPtr<GpuChannel>* gpu_channel) {
406     delete gpu_channel;
407   }
408
409   // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
410   // IO thread, it's only passed through - therefore the WeakPtr assumptions are
411   // respected.
412   base::WeakPtr<GpuChannel>* gpu_channel_;
413   IPC::Channel* channel_;
414   scoped_refptr<SyncPointManager> sync_point_manager_;
415   scoped_refptr<base::MessageLoopProxy> message_loop_;
416   scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
417
418   std::queue<PendingMessage> pending_messages_;
419
420   // Count of the number of IPCs forwarded to the GpuChannel.
421   uint64 messages_forwarded_to_channel_;
422
423   base::OneShotTimer<GpuChannelMessageFilter> timer_;
424
425   bool a_stub_is_descheduled_;
426
427   crypto::HMAC hmac_;
428 };
429
430 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
431                        GpuWatchdog* watchdog,
432                        gfx::GLShareGroup* share_group,
433                        gpu::gles2::MailboxManager* mailbox,
434                        int client_id,
435                        bool software)
436     : gpu_channel_manager_(gpu_channel_manager),
437       messages_processed_(0),
438       client_id_(client_id),
439       share_group_(share_group ? share_group : new gfx::GLShareGroup),
440       mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
441       image_manager_(new gpu::gles2::ImageManager),
442       watchdog_(watchdog),
443       software_(software),
444       handle_messages_scheduled_(false),
445       processed_get_state_fast_(false),
446       currently_processing_message_(NULL),
447       weak_factory_(this),
448       num_stubs_descheduled_(0) {
449   DCHECK(gpu_channel_manager);
450   DCHECK(client_id);
451
452   channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
453   const CommandLine* command_line = CommandLine::ForCurrentProcess();
454   log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
455   disallowed_features_.multisampling =
456       command_line->HasSwitch(switches::kDisableGLMultisampling);
457 }
458
459
460 bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
461                       base::WaitableEvent* shutdown_event) {
462   DCHECK(!channel_.get());
463
464   // Map renderer ID to a (single) channel to that process.
465   channel_.reset(new IPC::SyncChannel(
466       channel_id_,
467       IPC::Channel::MODE_SERVER,
468       this,
469       io_message_loop,
470       false,
471       shutdown_event));
472
473   base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
474       weak_factory_.GetWeakPtr()));
475
476   filter_ = new GpuChannelMessageFilter(
477       mailbox_manager_->private_key(),
478       weak_ptr,
479       gpu_channel_manager_->sync_point_manager(),
480       base::MessageLoopProxy::current());
481   io_message_loop_ = io_message_loop;
482   channel_->AddFilter(filter_.get());
483
484   devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
485
486   return true;
487 }
488
489 std::string GpuChannel::GetChannelName() {
490   return channel_id_;
491 }
492
493 #if defined(OS_POSIX)
494 int GpuChannel::TakeRendererFileDescriptor() {
495   if (!channel_) {
496     NOTREACHED();
497     return -1;
498   }
499   return channel_->TakeClientFileDescriptor();
500 }
501 #endif  // defined(OS_POSIX)
502
503 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
504   if (log_messages_) {
505     DVLOG(1) << "received message @" << &message << " on channel @" << this
506              << " with type " << message.type();
507   }
508
509   if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
510     if (processed_get_state_fast_) {
511       // Require a non-GetStateFast message in between two GetStateFast
512       // messages, to ensure progress is made.
513       std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
514
515       while (point != deferred_messages_.end() &&
516              (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
517         ++point;
518       }
519
520       if (point != deferred_messages_.end()) {
521         ++point;
522       }
523
524       deferred_messages_.insert(point, new IPC::Message(message));
525     } else {
526       // Move GetStateFast commands to the head of the queue, so the renderer
527       // doesn't have to wait any longer than necessary.
528       deferred_messages_.push_front(new IPC::Message(message));
529     }
530   } else {
531     deferred_messages_.push_back(new IPC::Message(message));
532   }
533
534   OnScheduled();
535
536   return true;
537 }
538
539 void GpuChannel::OnChannelError() {
540   gpu_channel_manager_->RemoveChannel(client_id_);
541 }
542
543 bool GpuChannel::Send(IPC::Message* message) {
544   // The GPU process must never send a synchronous IPC message to the renderer
545   // process. This could result in deadlock.
546   DCHECK(!message->is_sync());
547   if (log_messages_) {
548     DVLOG(1) << "sending message @" << message << " on channel @" << this
549              << " with type " << message->type();
550   }
551
552   if (!channel_) {
553     delete message;
554     return false;
555   }
556
557   return channel_->Send(message);
558 }
559
560 void GpuChannel::RequeueMessage() {
561   DCHECK(currently_processing_message_);
562   deferred_messages_.push_front(
563       new IPC::Message(*currently_processing_message_));
564   messages_processed_--;
565   currently_processing_message_ = NULL;
566 }
567
568 void GpuChannel::OnScheduled() {
569   if (handle_messages_scheduled_)
570     return;
571   // Post a task to handle any deferred messages. The deferred message queue is
572   // not emptied here, which ensures that OnMessageReceived will continue to
573   // defer newly received messages until the ones in the queue have all been
574   // handled by HandleMessage. HandleMessage is invoked as a
575   // task to prevent reentrancy.
576   base::MessageLoop::current()->PostTask(
577       FROM_HERE,
578       base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
579   handle_messages_scheduled_ = true;
580 }
581
582 void GpuChannel::StubSchedulingChanged(bool scheduled) {
583   bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
584   if (scheduled) {
585     num_stubs_descheduled_--;
586     OnScheduled();
587   } else {
588     num_stubs_descheduled_++;
589   }
590   DCHECK_LE(num_stubs_descheduled_, stubs_.size());
591   bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
592
593   if (a_stub_is_descheduled != a_stub_was_descheduled) {
594     if (preempting_flag_.get()) {
595       io_message_loop_->PostTask(
596           FROM_HERE,
597           base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
598                      filter_,
599                      a_stub_is_descheduled));
600     }
601   }
602 }
603
604 void GpuChannel::CreateViewCommandBuffer(
605     const gfx::GLSurfaceHandle& window,
606     int32 surface_id,
607     const GPUCreateCommandBufferConfig& init_params,
608     int32* route_id) {
609   TRACE_EVENT1("gpu",
610                "GpuChannel::CreateViewCommandBuffer",
611                "surface_id",
612                surface_id);
613
614   *route_id = MSG_ROUTING_NONE;
615
616   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
617
618   // Virtualize compositor contexts on OS X to prevent performance regressions
619   // when enabling FCM.
620   // http://crbug.com/180463
621   bool use_virtualized_gl_context = false;
622 #if defined(OS_MACOSX)
623   use_virtualized_gl_context = true;
624 #endif
625
626   *route_id = GenerateRouteID();
627   scoped_ptr<GpuCommandBufferStub> stub(
628       new GpuCommandBufferStub(this,
629                                share_group,
630                                window,
631                                mailbox_manager_.get(),
632                                image_manager_.get(),
633                                gfx::Size(),
634                                disallowed_features_,
635                                init_params.attribs,
636                                init_params.gpu_preference,
637                                use_virtualized_gl_context,
638                                *route_id,
639                                surface_id,
640                                watchdog_,
641                                software_,
642                                init_params.active_url));
643   if (preempted_flag_.get())
644     stub->SetPreemptByFlag(preempted_flag_);
645   router_.AddRoute(*route_id, stub.get());
646   stubs_.AddWithID(stub.release(), *route_id);
647 }
648
649 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
650   return stubs_.Lookup(route_id);
651 }
652
653 void GpuChannel::CreateImage(
654     gfx::PluginWindowHandle window,
655     int32 image_id,
656     gfx::Size* size) {
657   TRACE_EVENT1("gpu",
658                "GpuChannel::CreateImage",
659                "image_id",
660                image_id);
661
662   *size = gfx::Size();
663
664   if (image_manager_->LookupImage(image_id)) {
665     LOG(ERROR) << "CreateImage failed, image_id already in use.";
666     return;
667   }
668
669   scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
670   if (!image.get())
671     return;
672
673   image_manager_->AddImage(image.get(), image_id);
674   *size = image->GetSize();
675 }
676
677 void GpuChannel::DeleteImage(int32 image_id) {
678   TRACE_EVENT1("gpu",
679                "GpuChannel::DeleteImage",
680                "image_id",
681                image_id);
682
683   image_manager_->RemoveImage(image_id);
684 }
685
686 void GpuChannel::LoseAllContexts() {
687   gpu_channel_manager_->LoseAllContexts();
688 }
689
690 void GpuChannel::MarkAllContextsLost() {
691   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
692        !it.IsAtEnd(); it.Advance()) {
693     it.GetCurrentValue()->MarkContextLost();
694   }
695 }
696
697 void GpuChannel::DestroySoon() {
698   base::MessageLoop::current()->PostTask(
699       FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
700 }
701
702 int32 GpuChannel::GenerateRouteID() {
703   static int32 last_id = 0;
704   return ++last_id;
705 }
706
707 void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
708   router_.AddRoute(route_id, listener);
709 }
710
711 void GpuChannel::RemoveRoute(int32 route_id) {
712   router_.RemoveRoute(route_id);
713 }
714
715 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
716   if (!preempting_flag_.get()) {
717     preempting_flag_ = new gpu::PreemptionFlag;
718     io_message_loop_->PostTask(
719         FROM_HERE, base::Bind(
720             &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
721             filter_, preempting_flag_, num_stubs_descheduled_ > 0));
722   }
723   return preempting_flag_.get();
724 }
725
726 void GpuChannel::SetPreemptByFlag(
727     scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
728   preempted_flag_ = preempted_flag;
729
730   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
731        !it.IsAtEnd(); it.Advance()) {
732     it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
733   }
734 }
735
736 GpuChannel::~GpuChannel() {
737   if (preempting_flag_.get())
738     preempting_flag_->Reset();
739 }
740
741 void GpuChannel::OnDestroy() {
742   TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
743   gpu_channel_manager_->RemoveChannel(client_id_);
744 }
745
746 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
747   bool handled = true;
748   IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
749     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
750                         OnCreateOffscreenCommandBuffer)
751     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
752                         OnDestroyCommandBuffer)
753     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
754     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
755                         OnDestroyVideoEncoder)
756     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
757                         OnDevToolsStartEventsRecording)
758     IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
759                         OnDevToolsStopEventsRecording)
760     IPC_MESSAGE_HANDLER(
761         GpuChannelMsg_CollectRenderingStatsForSurface,
762         OnCollectRenderingStatsForSurface)
763     IPC_MESSAGE_UNHANDLED(handled = false)
764   IPC_END_MESSAGE_MAP()
765   DCHECK(handled) << msg.type();
766   return handled;
767 }
768
769 void GpuChannel::HandleMessage() {
770   handle_messages_scheduled_ = false;
771   if (deferred_messages_.empty())
772     return;
773
774   bool should_fast_track_ack = false;
775   IPC::Message* m = deferred_messages_.front();
776   GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
777
778   do {
779     if (stub) {
780       if (!stub->IsScheduled())
781         return;
782       if (stub->IsPreempted()) {
783         OnScheduled();
784         return;
785       }
786     }
787
788     scoped_ptr<IPC::Message> message(m);
789     deferred_messages_.pop_front();
790     bool message_processed = true;
791
792     processed_get_state_fast_ =
793         (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
794
795     currently_processing_message_ = message.get();
796     bool result;
797     if (message->routing_id() == MSG_ROUTING_CONTROL)
798       result = OnControlMessageReceived(*message);
799     else
800       result = router_.RouteMessage(*message);
801     currently_processing_message_ = NULL;
802
803     if (!result) {
804       // Respond to sync messages even if router failed to route.
805       if (message->is_sync()) {
806         IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
807         reply->set_reply_error();
808         Send(reply);
809       }
810     } else {
811       // If the command buffer becomes unscheduled as a result of handling the
812       // message but still has more commands to process, synthesize an IPC
813       // message to flush that command buffer.
814       if (stub) {
815         if (stub->HasUnprocessedCommands()) {
816           deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
817               stub->route_id()));
818           message_processed = false;
819         }
820       }
821     }
822     if (message_processed)
823       MessageProcessed();
824
825     // We want the EchoACK following the SwapBuffers to be sent as close as
826     // possible, avoiding scheduling other channels in the meantime.
827     should_fast_track_ack = false;
828     if (!deferred_messages_.empty()) {
829       m = deferred_messages_.front();
830       stub = stubs_.Lookup(m->routing_id());
831       should_fast_track_ack =
832           (m->type() == GpuCommandBufferMsg_Echo::ID) &&
833           stub && stub->IsScheduled();
834     }
835   } while (should_fast_track_ack);
836
837   if (!deferred_messages_.empty()) {
838     OnScheduled();
839   }
840 }
841
842 void GpuChannel::OnCreateOffscreenCommandBuffer(
843     const gfx::Size& size,
844     const GPUCreateCommandBufferConfig& init_params,
845     int32* route_id) {
846   TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
847   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
848
849   *route_id = GenerateRouteID();
850
851   scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
852       this,
853       share_group,
854       gfx::GLSurfaceHandle(),
855       mailbox_manager_.get(),
856       image_manager_.get(),
857       size,
858       disallowed_features_,
859       init_params.attribs,
860       init_params.gpu_preference,
861       false,
862       *route_id,
863       0,
864       watchdog_,
865       software_,
866       init_params.active_url));
867   if (preempted_flag_.get())
868     stub->SetPreemptByFlag(preempted_flag_);
869   router_.AddRoute(*route_id, stub.get());
870   stubs_.AddWithID(stub.release(), *route_id);
871   TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
872                "route_id", route_id);
873 }
874
875 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
876   TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
877                "route_id", route_id);
878
879   GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
880   if (!stub)
881     return;
882   bool need_reschedule = (stub && !stub->IsScheduled());
883   router_.RemoveRoute(route_id);
884   stubs_.Remove(route_id);
885   // In case the renderer is currently blocked waiting for a sync reply from the
886   // stub, we need to make sure to reschedule the GpuChannel here.
887   if (need_reschedule) {
888     // This stub won't get a chance to reschedule, so update the count now.
889     StubSchedulingChanged(true);
890   }
891 }
892
893 void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
894   TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
895
896   *route_id = GenerateRouteID();
897   GpuVideoEncodeAccelerator* encoder =
898       new GpuVideoEncodeAccelerator(this, *route_id);
899   router_.AddRoute(*route_id, encoder);
900   video_encoders_.AddWithID(encoder, *route_id);
901 }
902
903 void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
904   TRACE_EVENT1(
905       "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
906   GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
907   if (!encoder)
908     return;
909   router_.RemoveRoute(route_id);
910   video_encoders_.Remove(route_id);
911 }
912
913 void GpuChannel::OnDevToolsStartEventsRecording(int32* route_id) {
914   devtools_gpu_agent_->StartEventsRecording(route_id);
915 }
916
917 void GpuChannel::OnDevToolsStopEventsRecording() {
918   devtools_gpu_agent_->StopEventsRecording();
919 }
920
921 void GpuChannel::OnCollectRenderingStatsForSurface(
922     int32 surface_id, GpuRenderingStats* stats) {
923   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
924        !it.IsAtEnd(); it.Advance()) {
925     int texture_upload_count =
926         it.GetCurrentValue()->decoder()->GetTextureUploadCount();
927     base::TimeDelta total_texture_upload_time =
928         it.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
929     base::TimeDelta total_processing_commands_time =
930         it.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
931
932     stats->global_texture_upload_count += texture_upload_count;
933     stats->global_total_texture_upload_time += total_texture_upload_time;
934     stats->global_total_processing_commands_time +=
935         total_processing_commands_time;
936     if (it.GetCurrentValue()->surface_id() == surface_id) {
937       stats->texture_upload_count += texture_upload_count;
938       stats->total_texture_upload_time += total_texture_upload_time;
939       stats->total_processing_commands_time += total_processing_commands_time;
940     }
941   }
942
943   GPUVideoMemoryUsageStats usage_stats;
944   gpu_channel_manager_->gpu_memory_manager()->GetVideoMemoryUsageStats(
945       &usage_stats);
946   stats->global_video_memory_bytes_allocated = usage_stats.bytes_allocated;
947 }
948
949 void GpuChannel::MessageProcessed() {
950   messages_processed_++;
951   if (preempting_flag_.get()) {
952     io_message_loop_->PostTask(
953         FROM_HERE,
954         base::Bind(&GpuChannelMessageFilter::MessageProcessed,
955                    filter_,
956                    messages_processed_));
957   }
958 }
959
960 void GpuChannel::CacheShader(const std::string& key,
961                              const std::string& shader) {
962   gpu_channel_manager_->Send(
963       new GpuHostMsg_CacheShader(client_id_, key, shader));
964 }
965
966 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
967   channel_->AddFilter(filter);
968 }
969
970 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
971   channel_->RemoveFilter(filter);
972 }
973
974 }  // namespace content