- add sources.
[platform/framework/web/crosswalk.git] / src / content / common / gpu / gpu_channel.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
8
9 #include "content/common/gpu/gpu_channel.h"
10
11 #include <queue>
12 #include <vector>
13
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/rand_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "crypto/hmac.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager.h"
31 #include "ipc/ipc_channel.h"
32 #include "ipc/ipc_channel_proxy.h"
33 #include "ui/gl/gl_context.h"
34 #include "ui/gl/gl_image.h"
35 #include "ui/gl/gl_surface.h"
36
37 #if defined(OS_POSIX)
38 #include "ipc/ipc_channel_posix.h"
39 #endif
40
41 #if defined(OS_ANDROID)
42 #include "content/common/gpu/stream_texture_manager_android.h"
43 #endif
44
45 namespace content {
46 namespace {
47
48 // Number of milliseconds between successive vsync. Many GL commands block
49 // on vsync, so thresholds for preemption should be multiples of this.
50 const int64 kVsyncIntervalMs = 17;
51
52 // Amount of time that we will wait for an IPC to be processed before
53 // preempting. After a preemption, we must wait this long before triggering
54 // another preemption.
55 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
56
57 // Once we trigger a preemption, the maximum duration that we will wait
58 // before clearing the preemption.
59 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
60
61 // Stop the preemption once the time for the longest pending IPC drops
62 // below this threshold.
63 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
64
65 }  // anonymous namespace
66
67 // This filter does three things:
68 // - it counts and timestamps each message forwarded to the channel
69 //   so that we can preempt other channels if a message takes too long to
70 //   process. To guarantee fairness, we must wait a minimum amount of time
71 //   before preempting and we limit the amount of time that we can preempt in
72 //   one shot (see constants above).
73 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
74 //   thread, generating the sync point ID and responding immediately, and then
75 //   posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
76 //   into the channel's queue.
77 // - it generates mailbox names for clients of the GPU process on the IO thread.
78 class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
79  public:
80   // Takes ownership of gpu_channel (see below).
81   GpuChannelMessageFilter(const std::string& private_key,
82                           base::WeakPtr<GpuChannel>* gpu_channel,
83                           scoped_refptr<SyncPointManager> sync_point_manager,
84                           scoped_refptr<base::MessageLoopProxy> message_loop)
85       : preemption_state_(IDLE),
86         gpu_channel_(gpu_channel),
87         channel_(NULL),
88         sync_point_manager_(sync_point_manager),
89         message_loop_(message_loop),
90         messages_forwarded_to_channel_(0),
91         a_stub_is_descheduled_(false),
92         hmac_(crypto::HMAC::SHA256) {
93     bool success = hmac_.Init(base::StringPiece(private_key));
94     DCHECK(success);
95   }
96
97   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
98     DCHECK(!channel_);
99     channel_ = channel;
100   }
101
102   virtual void OnFilterRemoved() OVERRIDE {
103     DCHECK(channel_);
104     channel_ = NULL;
105   }
106
107   virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
108     DCHECK(channel_);
109
110     bool handled = true;
111     IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter, message)
112       IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames,
113                           OnGenerateMailboxNames)
114       IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync,
115                           OnGenerateMailboxNamesAsync)
116       IPC_MESSAGE_UNHANDLED(handled = false)
117     IPC_END_MESSAGE_MAP()
118
119     if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
120       // This message should not be sent explicitly by the renderer.
121       NOTREACHED();
122       handled = true;
123     }
124
125     // All other messages get processed by the GpuChannel.
126     if (!handled) {
127       messages_forwarded_to_channel_++;
128       if (preempting_flag_.get())
129         pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
130       UpdatePreemptionState();
131     }
132
133     if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
134       uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
135       IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
136       GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
137       Send(reply);
138       message_loop_->PostTask(FROM_HERE, base::Bind(
139           &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
140           gpu_channel_,
141           sync_point_manager_,
142           message.routing_id(),
143           sync_point));
144       handled = true;
145     }
146     return handled;
147   }
148
149   void MessageProcessed(uint64 messages_processed) {
150     while (!pending_messages_.empty() &&
151            pending_messages_.front().message_number <= messages_processed)
152       pending_messages_.pop();
153     UpdatePreemptionState();
154   }
155
156   void SetPreemptingFlagAndSchedulingState(
157       gpu::PreemptionFlag* preempting_flag,
158       bool a_stub_is_descheduled) {
159     preempting_flag_ = preempting_flag;
160     a_stub_is_descheduled_ = a_stub_is_descheduled;
161   }
162
163   void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
164     a_stub_is_descheduled_ = a_stub_is_descheduled;
165     UpdatePreemptionState();
166   }
167
168   bool Send(IPC::Message* message) {
169     return channel_->Send(message);
170   }
171
172  protected:
173   virtual ~GpuChannelMessageFilter() {
174     message_loop_->PostTask(FROM_HERE, base::Bind(
175         &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
176   }
177
178  private:
179   // Message handlers.
180   void OnGenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* result) {
181     TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num);
182
183     result->resize(num);
184
185     for (unsigned i = 0; i < num; ++i) {
186       char name[GL_MAILBOX_SIZE_CHROMIUM];
187       base::RandBytes(name, sizeof(name) / 2);
188
189       bool success = hmac_.Sign(
190           base::StringPiece(name, sizeof(name) / 2),
191           reinterpret_cast<unsigned char*>(name) + sizeof(name) / 2,
192           sizeof(name) / 2);
193       DCHECK(success);
194
195       (*result)[i].SetName(reinterpret_cast<int8*>(name));
196     }
197   }
198
199   void OnGenerateMailboxNamesAsync(unsigned num) {
200     std::vector<gpu::Mailbox> names;
201     OnGenerateMailboxNames(num, &names);
202     Send(new GpuChannelMsg_GenerateMailboxNamesReply(names));
203   }
204
205   enum PreemptionState {
206     // Either there's no other channel to preempt, there are no messages
207     // pending processing, or we just finished preempting and have to wait
208     // before preempting again.
209     IDLE,
210     // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
211     WAITING,
212     // We can preempt whenever any IPC processing takes more than
213     // kPreemptWaitTimeMs.
214     CHECKING,
215     // We are currently preempting (i.e. no stub is descheduled).
216     PREEMPTING,
217     // We would like to preempt, but some stub is descheduled.
218     WOULD_PREEMPT_DESCHEDULED,
219   };
220
221   PreemptionState preemption_state_;
222
223   // Maximum amount of time that we can spend in PREEMPTING.
224   // It is reset when we transition to IDLE.
225   base::TimeDelta max_preemption_time_;
226
227   struct PendingMessage {
228     uint64 message_number;
229     base::TimeTicks time_received;
230
231     explicit PendingMessage(uint64 message_number)
232         : message_number(message_number),
233           time_received(base::TimeTicks::Now()) {
234     }
235   };
236
237   void UpdatePreemptionState() {
238     switch (preemption_state_) {
239       case IDLE:
240         if (preempting_flag_.get() && !pending_messages_.empty())
241           TransitionToWaiting();
242         break;
243       case WAITING:
244         // A timer will transition us to CHECKING.
245         DCHECK(timer_.IsRunning());
246         break;
247       case CHECKING:
248         if (!pending_messages_.empty()) {
249           base::TimeDelta time_elapsed =
250               base::TimeTicks::Now() - pending_messages_.front().time_received;
251           if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
252             // Schedule another check for when the IPC may go long.
253             timer_.Start(
254                 FROM_HERE,
255                 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
256                     time_elapsed,
257                 this, &GpuChannelMessageFilter::UpdatePreemptionState);
258           } else {
259             if (a_stub_is_descheduled_)
260               TransitionToWouldPreemptDescheduled();
261             else
262               TransitionToPreempting();
263           }
264         }
265         break;
266       case PREEMPTING:
267         // A TransitionToIdle() timer should always be running in this state.
268         DCHECK(timer_.IsRunning());
269         if (a_stub_is_descheduled_)
270           TransitionToWouldPreemptDescheduled();
271         else
272           TransitionToIdleIfCaughtUp();
273         break;
274       case WOULD_PREEMPT_DESCHEDULED:
275         // A TransitionToIdle() timer should never be running in this state.
276         DCHECK(!timer_.IsRunning());
277         if (!a_stub_is_descheduled_)
278           TransitionToPreempting();
279         else
280           TransitionToIdleIfCaughtUp();
281         break;
282       default:
283         NOTREACHED();
284     }
285   }
286
287   void TransitionToIdleIfCaughtUp() {
288     DCHECK(preemption_state_ == PREEMPTING ||
289            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
290     if (pending_messages_.empty()) {
291       TransitionToIdle();
292     } else {
293       base::TimeDelta time_elapsed =
294           base::TimeTicks::Now() - pending_messages_.front().time_received;
295       if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
296         TransitionToIdle();
297     }
298   }
299
300   void TransitionToIdle() {
301     DCHECK(preemption_state_ == PREEMPTING ||
302            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
303     // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
304     timer_.Stop();
305
306     preemption_state_ = IDLE;
307     preempting_flag_->Reset();
308     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
309
310     UpdatePreemptionState();
311   }
312
313   void TransitionToWaiting() {
314     DCHECK_EQ(preemption_state_, IDLE);
315     DCHECK(!timer_.IsRunning());
316
317     preemption_state_ = WAITING;
318     timer_.Start(
319         FROM_HERE,
320         base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
321         this, &GpuChannelMessageFilter::TransitionToChecking);
322   }
323
324   void TransitionToChecking() {
325     DCHECK_EQ(preemption_state_, WAITING);
326     DCHECK(!timer_.IsRunning());
327
328     preemption_state_ = CHECKING;
329     max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
330     UpdatePreemptionState();
331   }
332
333   void TransitionToPreempting() {
334     DCHECK(preemption_state_ == CHECKING ||
335            preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
336     DCHECK(!a_stub_is_descheduled_);
337
338     // Stop any pending state update checks that we may have queued
339     // while CHECKING.
340     if (preemption_state_ == CHECKING)
341       timer_.Stop();
342
343     preemption_state_ = PREEMPTING;
344     preempting_flag_->Set();
345     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
346
347     timer_.Start(
348        FROM_HERE,
349        max_preemption_time_,
350        this, &GpuChannelMessageFilter::TransitionToIdle);
351
352     UpdatePreemptionState();
353   }
354
355   void TransitionToWouldPreemptDescheduled() {
356     DCHECK(preemption_state_ == CHECKING ||
357            preemption_state_ == PREEMPTING);
358     DCHECK(a_stub_is_descheduled_);
359
360     if (preemption_state_ == CHECKING) {
361       // Stop any pending state update checks that we may have queued
362       // while CHECKING.
363       timer_.Stop();
364     } else {
365       // Stop any TransitionToIdle() timers that we may have queued
366       // while PREEMPTING.
367       timer_.Stop();
368       max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
369       if (max_preemption_time_ < base::TimeDelta()) {
370         TransitionToIdle();
371         return;
372       }
373     }
374
375     preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
376     preempting_flag_->Reset();
377     TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
378
379     UpdatePreemptionState();
380   }
381
382   static void InsertSyncPointOnMainThread(
383       base::WeakPtr<GpuChannel>* gpu_channel,
384       scoped_refptr<SyncPointManager> manager,
385       int32 routing_id,
386       uint32 sync_point) {
387     // This function must ensure that the sync point will be retired. Normally
388     // we'll find the stub based on the routing ID, and associate the sync point
389     // with it, but if that fails for any reason (channel or stub already
390     // deleted, invalid routing id), we need to retire the sync point
391     // immediately.
392     if (gpu_channel->get()) {
393       GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
394           routing_id);
395       if (stub) {
396         stub->AddSyncPoint(sync_point);
397         GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
398         gpu_channel->get()->OnMessageReceived(message);
399         return;
400       } else {
401         gpu_channel->get()->MessageProcessed();
402       }
403     }
404     manager->RetireSyncPoint(sync_point);
405   }
406
407   static void DeleteWeakPtrOnMainThread(
408       base::WeakPtr<GpuChannel>* gpu_channel) {
409     delete gpu_channel;
410   }
411
412   // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
413   // IO thread, it's only passed through - therefore the WeakPtr assumptions are
414   // respected.
415   base::WeakPtr<GpuChannel>* gpu_channel_;
416   IPC::Channel* channel_;
417   scoped_refptr<SyncPointManager> sync_point_manager_;
418   scoped_refptr<base::MessageLoopProxy> message_loop_;
419   scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
420
421   std::queue<PendingMessage> pending_messages_;
422
423   // Count of the number of IPCs forwarded to the GpuChannel.
424   uint64 messages_forwarded_to_channel_;
425
426   base::OneShotTimer<GpuChannelMessageFilter> timer_;
427
428   bool a_stub_is_descheduled_;
429
430   crypto::HMAC hmac_;
431 };
432
433 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
434                        GpuWatchdog* watchdog,
435                        gfx::GLShareGroup* share_group,
436                        gpu::gles2::MailboxManager* mailbox,
437                        int client_id,
438                        bool software)
439     : gpu_channel_manager_(gpu_channel_manager),
440       messages_processed_(0),
441       client_id_(client_id),
442       share_group_(share_group ? share_group : new gfx::GLShareGroup),
443       mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
444       image_manager_(new gpu::gles2::ImageManager),
445       watchdog_(watchdog),
446       software_(software),
447       handle_messages_scheduled_(false),
448       processed_get_state_fast_(false),
449       currently_processing_message_(NULL),
450       weak_factory_(this),
451       num_stubs_descheduled_(0) {
452   DCHECK(gpu_channel_manager);
453   DCHECK(client_id);
454
455   channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
456   const CommandLine* command_line = CommandLine::ForCurrentProcess();
457   log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
458   disallowed_features_.multisampling =
459       command_line->HasSwitch(switches::kDisableGLMultisampling);
460 #if defined(OS_ANDROID)
461   stream_texture_manager_.reset(new StreamTextureManagerAndroid(this));
462 #endif
463 }
464
465
466 bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
467                       base::WaitableEvent* shutdown_event) {
468   DCHECK(!channel_.get());
469
470   // Map renderer ID to a (single) channel to that process.
471   channel_.reset(new IPC::SyncChannel(
472       channel_id_,
473       IPC::Channel::MODE_SERVER,
474       this,
475       io_message_loop,
476       false,
477       shutdown_event));
478
479   base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
480       weak_factory_.GetWeakPtr()));
481
482   filter_ = new GpuChannelMessageFilter(
483       mailbox_manager_->private_key(),
484       weak_ptr,
485       gpu_channel_manager_->sync_point_manager(),
486       base::MessageLoopProxy::current());
487   io_message_loop_ = io_message_loop;
488   channel_->AddFilter(filter_.get());
489
490   return true;
491 }
492
493 std::string GpuChannel::GetChannelName() {
494   return channel_id_;
495 }
496
497 #if defined(OS_POSIX)
498 int GpuChannel::TakeRendererFileDescriptor() {
499   if (!channel_) {
500     NOTREACHED();
501     return -1;
502   }
503   return channel_->TakeClientFileDescriptor();
504 }
505 #endif  // defined(OS_POSIX)
506
507 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
508   if (log_messages_) {
509     DVLOG(1) << "received message @" << &message << " on channel @" << this
510              << " with type " << message.type();
511   }
512
513   if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
514     if (processed_get_state_fast_) {
515       // Require a non-GetStateFast message in between two GetStateFast
516       // messages, to ensure progress is made.
517       std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
518
519       while (point != deferred_messages_.end() &&
520              (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
521         ++point;
522       }
523
524       if (point != deferred_messages_.end()) {
525         ++point;
526       }
527
528       deferred_messages_.insert(point, new IPC::Message(message));
529     } else {
530       // Move GetStateFast commands to the head of the queue, so the renderer
531       // doesn't have to wait any longer than necessary.
532       deferred_messages_.push_front(new IPC::Message(message));
533     }
534   } else {
535     deferred_messages_.push_back(new IPC::Message(message));
536   }
537
538   OnScheduled();
539
540   return true;
541 }
542
543 void GpuChannel::OnChannelError() {
544   gpu_channel_manager_->RemoveChannel(client_id_);
545 }
546
547 bool GpuChannel::Send(IPC::Message* message) {
548   // The GPU process must never send a synchronous IPC message to the renderer
549   // process. This could result in deadlock.
550   DCHECK(!message->is_sync());
551   if (log_messages_) {
552     DVLOG(1) << "sending message @" << message << " on channel @" << this
553              << " with type " << message->type();
554   }
555
556   if (!channel_) {
557     delete message;
558     return false;
559   }
560
561   return channel_->Send(message);
562 }
563
564 void GpuChannel::RequeueMessage() {
565   DCHECK(currently_processing_message_);
566   deferred_messages_.push_front(
567       new IPC::Message(*currently_processing_message_));
568   messages_processed_--;
569   currently_processing_message_ = NULL;
570 }
571
572 void GpuChannel::OnScheduled() {
573   if (handle_messages_scheduled_)
574     return;
575   // Post a task to handle any deferred messages. The deferred message queue is
576   // not emptied here, which ensures that OnMessageReceived will continue to
577   // defer newly received messages until the ones in the queue have all been
578   // handled by HandleMessage. HandleMessage is invoked as a
579   // task to prevent reentrancy.
580   base::MessageLoop::current()->PostTask(
581       FROM_HERE,
582       base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
583   handle_messages_scheduled_ = true;
584 }
585
586 void GpuChannel::StubSchedulingChanged(bool scheduled) {
587   bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
588   if (scheduled) {
589     num_stubs_descheduled_--;
590     OnScheduled();
591   } else {
592     num_stubs_descheduled_++;
593   }
594   DCHECK_LE(num_stubs_descheduled_, stubs_.size());
595   bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
596
597   if (a_stub_is_descheduled != a_stub_was_descheduled) {
598     if (preempting_flag_.get()) {
599       io_message_loop_->PostTask(
600           FROM_HERE,
601           base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
602                      filter_,
603                      a_stub_is_descheduled));
604     }
605   }
606 }
607
608 void GpuChannel::CreateViewCommandBuffer(
609     const gfx::GLSurfaceHandle& window,
610     int32 surface_id,
611     const GPUCreateCommandBufferConfig& init_params,
612     int32* route_id) {
613   TRACE_EVENT1("gpu",
614                "GpuChannel::CreateViewCommandBuffer",
615                "surface_id",
616                surface_id);
617
618   *route_id = MSG_ROUTING_NONE;
619
620   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
621
622   // Virtualize compositor contexts on OS X to prevent performance regressions
623   // when enabling FCM.
624   // http://crbug.com/180463
625   bool use_virtualized_gl_context = false;
626 #if defined(OS_MACOSX)
627   use_virtualized_gl_context = true;
628 #endif
629
630   *route_id = GenerateRouteID();
631   scoped_ptr<GpuCommandBufferStub> stub(
632       new GpuCommandBufferStub(this,
633                                share_group,
634                                window,
635                                mailbox_manager_.get(),
636                                image_manager_.get(),
637                                gfx::Size(),
638                                disallowed_features_,
639                                init_params.attribs,
640                                init_params.gpu_preference,
641                                use_virtualized_gl_context,
642                                *route_id,
643                                surface_id,
644                                watchdog_,
645                                software_,
646                                init_params.active_url));
647   if (preempted_flag_.get())
648     stub->SetPreemptByFlag(preempted_flag_);
649   router_.AddRoute(*route_id, stub.get());
650   stubs_.AddWithID(stub.release(), *route_id);
651 }
652
653 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
654   return stubs_.Lookup(route_id);
655 }
656
657 void GpuChannel::CreateImage(
658     gfx::PluginWindowHandle window,
659     int32 image_id,
660     gfx::Size* size) {
661   TRACE_EVENT1("gpu",
662                "GpuChannel::CreateImage",
663                "image_id",
664                image_id);
665
666   *size = gfx::Size();
667
668   if (image_manager_->LookupImage(image_id)) {
669     LOG(ERROR) << "CreateImage failed, image_id already in use.";
670     return;
671   }
672
673   scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
674   if (!image.get())
675     return;
676
677   image_manager_->AddImage(image.get(), image_id);
678   *size = image->GetSize();
679 }
680
681 void GpuChannel::DeleteImage(int32 image_id) {
682   TRACE_EVENT1("gpu",
683                "GpuChannel::DeleteImage",
684                "image_id",
685                image_id);
686
687   image_manager_->RemoveImage(image_id);
688 }
689
690 void GpuChannel::LoseAllContexts() {
691   gpu_channel_manager_->LoseAllContexts();
692 }
693
694 void GpuChannel::MarkAllContextsLost() {
695   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
696        !it.IsAtEnd(); it.Advance()) {
697     it.GetCurrentValue()->MarkContextLost();
698   }
699 }
700
701 void GpuChannel::DestroySoon() {
702   base::MessageLoop::current()->PostTask(
703       FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
704 }
705
706 int GpuChannel::GenerateRouteID() {
707   static int last_id = 0;
708   return ++last_id;
709 }
710
711 void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
712   router_.AddRoute(route_id, listener);
713 }
714
715 void GpuChannel::RemoveRoute(int32 route_id) {
716   router_.RemoveRoute(route_id);
717 }
718
719 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
720   if (!preempting_flag_.get()) {
721     preempting_flag_ = new gpu::PreemptionFlag;
722     io_message_loop_->PostTask(
723         FROM_HERE, base::Bind(
724             &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
725             filter_, preempting_flag_, num_stubs_descheduled_ > 0));
726   }
727   return preempting_flag_.get();
728 }
729
730 void GpuChannel::SetPreemptByFlag(
731     scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
732   preempted_flag_ = preempted_flag;
733
734   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
735        !it.IsAtEnd(); it.Advance()) {
736     it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
737   }
738 }
739
740 GpuChannel::~GpuChannel() {
741   if (preempting_flag_.get())
742     preempting_flag_->Reset();
743 }
744
745 void GpuChannel::OnDestroy() {
746   TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
747   gpu_channel_manager_->RemoveChannel(client_id_);
748 }
749
750 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
751   bool handled = true;
752   IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
753     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
754                         OnCreateOffscreenCommandBuffer)
755     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
756                         OnDestroyCommandBuffer)
757     IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
758     IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
759                         OnDestroyVideoEncoder)
760 #if defined(OS_ANDROID)
761     IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterStreamTextureProxy,
762                         OnRegisterStreamTextureProxy)
763     IPC_MESSAGE_HANDLER(GpuChannelMsg_EstablishStreamTexture,
764                         OnEstablishStreamTexture)
765     IPC_MESSAGE_HANDLER(GpuChannelMsg_SetStreamTextureSize,
766                         OnSetStreamTextureSize)
767 #endif
768     IPC_MESSAGE_HANDLER(
769         GpuChannelMsg_CollectRenderingStatsForSurface,
770         OnCollectRenderingStatsForSurface)
771     IPC_MESSAGE_UNHANDLED(handled = false)
772   IPC_END_MESSAGE_MAP()
773   DCHECK(handled) << msg.type();
774   return handled;
775 }
776
777 void GpuChannel::HandleMessage() {
778   handle_messages_scheduled_ = false;
779   if (deferred_messages_.empty())
780     return;
781
782   bool should_fast_track_ack = false;
783   IPC::Message* m = deferred_messages_.front();
784   GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
785
786   do {
787     if (stub) {
788       if (!stub->IsScheduled())
789         return;
790       if (stub->IsPreempted()) {
791         OnScheduled();
792         return;
793       }
794     }
795
796     scoped_ptr<IPC::Message> message(m);
797     deferred_messages_.pop_front();
798     bool message_processed = true;
799
800     processed_get_state_fast_ =
801         (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
802
803     currently_processing_message_ = message.get();
804     bool result;
805     if (message->routing_id() == MSG_ROUTING_CONTROL)
806       result = OnControlMessageReceived(*message);
807     else
808       result = router_.RouteMessage(*message);
809     currently_processing_message_ = NULL;
810
811     if (!result) {
812       // Respond to sync messages even if router failed to route.
813       if (message->is_sync()) {
814         IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
815         reply->set_reply_error();
816         Send(reply);
817       }
818     } else {
819       // If the command buffer becomes unscheduled as a result of handling the
820       // message but still has more commands to process, synthesize an IPC
821       // message to flush that command buffer.
822       if (stub) {
823         if (stub->HasUnprocessedCommands()) {
824           deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
825               stub->route_id()));
826           message_processed = false;
827         }
828       }
829     }
830     if (message_processed)
831       MessageProcessed();
832
833     // We want the EchoACK following the SwapBuffers to be sent as close as
834     // possible, avoiding scheduling other channels in the meantime.
835     should_fast_track_ack = false;
836     if (!deferred_messages_.empty()) {
837       m = deferred_messages_.front();
838       stub = stubs_.Lookup(m->routing_id());
839       should_fast_track_ack =
840           (m->type() == GpuCommandBufferMsg_Echo::ID) &&
841           stub && stub->IsScheduled();
842     }
843   } while (should_fast_track_ack);
844
845   if (!deferred_messages_.empty()) {
846     OnScheduled();
847   }
848 }
849
850 void GpuChannel::OnCreateOffscreenCommandBuffer(
851     const gfx::Size& size,
852     const GPUCreateCommandBufferConfig& init_params,
853     int32* route_id) {
854   TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
855   GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
856
857   *route_id = GenerateRouteID();
858
859   scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
860       this,
861       share_group,
862       gfx::GLSurfaceHandle(),
863       mailbox_manager_.get(),
864       image_manager_.get(),
865       size,
866       disallowed_features_,
867       init_params.attribs,
868       init_params.gpu_preference,
869       false,
870       *route_id,
871       0,
872       watchdog_,
873       software_,
874       init_params.active_url));
875   if (preempted_flag_.get())
876     stub->SetPreemptByFlag(preempted_flag_);
877   router_.AddRoute(*route_id, stub.get());
878   stubs_.AddWithID(stub.release(), *route_id);
879   TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
880                "route_id", route_id);
881 }
882
883 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
884   TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
885                "route_id", route_id);
886
887   GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
888   if (!stub)
889     return;
890   bool need_reschedule = (stub && !stub->IsScheduled());
891   router_.RemoveRoute(route_id);
892   stubs_.Remove(route_id);
893   // In case the renderer is currently blocked waiting for a sync reply from the
894   // stub, we need to make sure to reschedule the GpuChannel here.
895   if (need_reschedule) {
896     // This stub won't get a chance to reschedule, so update the count now.
897     StubSchedulingChanged(true);
898   }
899 }
900
901 void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
902   TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
903
904   *route_id = GenerateRouteID();
905   GpuVideoEncodeAccelerator* encoder =
906       new GpuVideoEncodeAccelerator(this, *route_id);
907   router_.AddRoute(*route_id, encoder);
908   video_encoders_.AddWithID(encoder, *route_id);
909 }
910
911 void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
912   TRACE_EVENT1(
913       "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
914   GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
915   if (!encoder)
916     return;
917   router_.RemoveRoute(route_id);
918   video_encoders_.Remove(route_id);
919 }
920
921 #if defined(OS_ANDROID)
922 void GpuChannel::OnRegisterStreamTextureProxy(
923     int32 stream_id, int32* route_id) {
924   // Note that route_id is only used for notifications sent out from here.
925   // StreamTextureManager owns all texture objects and for incoming messages
926   // it finds the correct object based on stream_id.
927   *route_id = GenerateRouteID();
928   stream_texture_manager_->RegisterStreamTextureProxy(stream_id, *route_id);
929 }
930
931 void GpuChannel::OnEstablishStreamTexture(
932     int32 stream_id, int32 primary_id, int32 secondary_id) {
933   stream_texture_manager_->EstablishStreamTexture(
934       stream_id, primary_id, secondary_id);
935 }
936
937 void GpuChannel::OnSetStreamTextureSize(
938     int32 stream_id, const gfx::Size& size) {
939   stream_texture_manager_->SetStreamTextureSize(stream_id, size);
940 }
941 #endif
942
943 void GpuChannel::OnCollectRenderingStatsForSurface(
944     int32 surface_id, GpuRenderingStats* stats) {
945   for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
946        !it.IsAtEnd(); it.Advance()) {
947     int texture_upload_count =
948         it.GetCurrentValue()->decoder()->GetTextureUploadCount();
949     base::TimeDelta total_texture_upload_time =
950         it.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
951     base::TimeDelta total_processing_commands_time =
952         it.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
953
954     stats->global_texture_upload_count += texture_upload_count;
955     stats->global_total_texture_upload_time += total_texture_upload_time;
956     stats->global_total_processing_commands_time +=
957         total_processing_commands_time;
958     if (it.GetCurrentValue()->surface_id() == surface_id) {
959       stats->texture_upload_count += texture_upload_count;
960       stats->total_texture_upload_time += total_texture_upload_time;
961       stats->total_processing_commands_time += total_processing_commands_time;
962     }
963   }
964
965   GPUVideoMemoryUsageStats usage_stats;
966   gpu_channel_manager_->gpu_memory_manager()->GetVideoMemoryUsageStats(
967       &usage_stats);
968   stats->global_video_memory_bytes_allocated = usage_stats.bytes_allocated;
969 }
970
971 void GpuChannel::MessageProcessed() {
972   messages_processed_++;
973   if (preempting_flag_.get()) {
974     io_message_loop_->PostTask(
975         FROM_HERE,
976         base::Bind(&GpuChannelMessageFilter::MessageProcessed,
977                    filter_,
978                    messages_processed_));
979   }
980 }
981
982 void GpuChannel::CacheShader(const std::string& key,
983                              const std::string& shader) {
984   gpu_channel_manager_->Send(
985       new GpuHostMsg_CacheShader(client_id_, key, shader));
986 }
987
988 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
989   channel_->AddFilter(filter);
990 }
991
992 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
993   channel_->RemoveFilter(filter);
994 }
995
996 }  // namespace content