1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "content/common/gpu/devtools_gpu_agent.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_manager.h"
29 #include "gpu/command_buffer/service/mailbox_manager.h"
30 #include "ipc/ipc_channel.h"
31 #include "ipc/ipc_channel_proxy.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_surface.h"
37 #include "ipc/ipc_channel_posix.h"
43 // Number of milliseconds between successive vsync. Many GL commands block
44 // on vsync, so thresholds for preemption should be multiples of this.
45 const int64 kVsyncIntervalMs = 17;
47 // Amount of time that we will wait for an IPC to be processed before
48 // preempting. After a preemption, we must wait this long before triggering
49 // another preemption.
50 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
52 // Once we trigger a preemption, the maximum duration that we will wait
53 // before clearing the preemption.
54 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
56 // Stop the preemption once the time for the longest pending IPC drops
57 // below this threshold.
58 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
60 } // anonymous namespace
62 // This filter does three things:
63 // - it counts and timestamps each message forwarded to the channel
64 // so that we can preempt other channels if a message takes too long to
65 // process. To guarantee fairness, we must wait a minimum amount of time
66 // before preempting and we limit the amount of time that we can preempt in
67 // one shot (see constants above).
68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
69 // thread, generating the sync point ID and responding immediately, and then
70 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
71 // into the channel's queue.
72 // - it generates mailbox names for clients of the GPU process on the IO thread.
73 class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
75 // Takes ownership of gpu_channel (see below).
76 GpuChannelMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
77 scoped_refptr<SyncPointManager> sync_point_manager,
78 scoped_refptr<base::MessageLoopProxy> message_loop)
79 : preemption_state_(IDLE),
80 gpu_channel_(gpu_channel),
82 sync_point_manager_(sync_point_manager),
83 message_loop_(message_loop),
84 messages_forwarded_to_channel_(0),
85 a_stub_is_descheduled_(false) {
88 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
93 virtual void OnFilterRemoved() OVERRIDE {
98 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
101 bool handled = false;
102 if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
103 // This message should not be sent explicitly by the renderer.
104 DLOG(ERROR) << "Client should not send "
105 "GpuCommandBufferMsg_RetireSyncPoint message";
109 // All other messages get processed by the GpuChannel.
111 messages_forwarded_to_channel_++;
112 if (preempting_flag_.get())
113 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
114 UpdatePreemptionState();
117 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
118 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
119 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
120 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
122 message_loop_->PostTask(FROM_HERE, base::Bind(
123 &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
126 message.routing_id(),
133 void MessageProcessed(uint64 messages_processed) {
134 while (!pending_messages_.empty() &&
135 pending_messages_.front().message_number <= messages_processed)
136 pending_messages_.pop();
137 UpdatePreemptionState();
140 void SetPreemptingFlagAndSchedulingState(
141 gpu::PreemptionFlag* preempting_flag,
142 bool a_stub_is_descheduled) {
143 preempting_flag_ = preempting_flag;
144 a_stub_is_descheduled_ = a_stub_is_descheduled;
147 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
148 a_stub_is_descheduled_ = a_stub_is_descheduled;
149 UpdatePreemptionState();
152 bool Send(IPC::Message* message) {
153 return channel_->Send(message);
157 virtual ~GpuChannelMessageFilter() {
158 message_loop_->PostTask(FROM_HERE, base::Bind(
159 &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
163 enum PreemptionState {
164 // Either there's no other channel to preempt, there are no messages
165 // pending processing, or we just finished preempting and have to wait
166 // before preempting again.
168 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
170 // We can preempt whenever any IPC processing takes more than
171 // kPreemptWaitTimeMs.
173 // We are currently preempting (i.e. no stub is descheduled).
175 // We would like to preempt, but some stub is descheduled.
176 WOULD_PREEMPT_DESCHEDULED,
179 PreemptionState preemption_state_;
181 // Maximum amount of time that we can spend in PREEMPTING.
182 // It is reset when we transition to IDLE.
183 base::TimeDelta max_preemption_time_;
185 struct PendingMessage {
186 uint64 message_number;
187 base::TimeTicks time_received;
189 explicit PendingMessage(uint64 message_number)
190 : message_number(message_number),
191 time_received(base::TimeTicks::Now()) {
195 void UpdatePreemptionState() {
196 switch (preemption_state_) {
198 if (preempting_flag_.get() && !pending_messages_.empty())
199 TransitionToWaiting();
202 // A timer will transition us to CHECKING.
203 DCHECK(timer_.IsRunning());
206 if (!pending_messages_.empty()) {
207 base::TimeDelta time_elapsed =
208 base::TimeTicks::Now() - pending_messages_.front().time_received;
209 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
210 // Schedule another check for when the IPC may go long.
213 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
215 this, &GpuChannelMessageFilter::UpdatePreemptionState);
217 if (a_stub_is_descheduled_)
218 TransitionToWouldPreemptDescheduled();
220 TransitionToPreempting();
225 // A TransitionToIdle() timer should always be running in this state.
226 DCHECK(timer_.IsRunning());
227 if (a_stub_is_descheduled_)
228 TransitionToWouldPreemptDescheduled();
230 TransitionToIdleIfCaughtUp();
232 case WOULD_PREEMPT_DESCHEDULED:
233 // A TransitionToIdle() timer should never be running in this state.
234 DCHECK(!timer_.IsRunning());
235 if (!a_stub_is_descheduled_)
236 TransitionToPreempting();
238 TransitionToIdleIfCaughtUp();
245 void TransitionToIdleIfCaughtUp() {
246 DCHECK(preemption_state_ == PREEMPTING ||
247 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
248 if (pending_messages_.empty()) {
251 base::TimeDelta time_elapsed =
252 base::TimeTicks::Now() - pending_messages_.front().time_received;
253 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
258 void TransitionToIdle() {
259 DCHECK(preemption_state_ == PREEMPTING ||
260 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
261 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
264 preemption_state_ = IDLE;
265 preempting_flag_->Reset();
266 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
268 UpdatePreemptionState();
271 void TransitionToWaiting() {
272 DCHECK_EQ(preemption_state_, IDLE);
273 DCHECK(!timer_.IsRunning());
275 preemption_state_ = WAITING;
278 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
279 this, &GpuChannelMessageFilter::TransitionToChecking);
282 void TransitionToChecking() {
283 DCHECK_EQ(preemption_state_, WAITING);
284 DCHECK(!timer_.IsRunning());
286 preemption_state_ = CHECKING;
287 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
288 UpdatePreemptionState();
291 void TransitionToPreempting() {
292 DCHECK(preemption_state_ == CHECKING ||
293 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
294 DCHECK(!a_stub_is_descheduled_);
296 // Stop any pending state update checks that we may have queued
298 if (preemption_state_ == CHECKING)
301 preemption_state_ = PREEMPTING;
302 preempting_flag_->Set();
303 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
307 max_preemption_time_,
308 this, &GpuChannelMessageFilter::TransitionToIdle);
310 UpdatePreemptionState();
313 void TransitionToWouldPreemptDescheduled() {
314 DCHECK(preemption_state_ == CHECKING ||
315 preemption_state_ == PREEMPTING);
316 DCHECK(a_stub_is_descheduled_);
318 if (preemption_state_ == CHECKING) {
319 // Stop any pending state update checks that we may have queued
323 // Stop any TransitionToIdle() timers that we may have queued
326 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
327 if (max_preemption_time_ < base::TimeDelta()) {
333 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
334 preempting_flag_->Reset();
335 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
337 UpdatePreemptionState();
340 static void InsertSyncPointOnMainThread(
341 base::WeakPtr<GpuChannel>* gpu_channel,
342 scoped_refptr<SyncPointManager> manager,
345 // This function must ensure that the sync point will be retired. Normally
346 // we'll find the stub based on the routing ID, and associate the sync point
347 // with it, but if that fails for any reason (channel or stub already
348 // deleted, invalid routing id), we need to retire the sync point
350 if (gpu_channel->get()) {
351 GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
354 stub->AddSyncPoint(sync_point);
355 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
356 gpu_channel->get()->OnMessageReceived(message);
359 gpu_channel->get()->MessageProcessed();
362 manager->RetireSyncPoint(sync_point);
365 static void DeleteWeakPtrOnMainThread(
366 base::WeakPtr<GpuChannel>* gpu_channel) {
370 // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
371 // IO thread, it's only passed through - therefore the WeakPtr assumptions are
373 base::WeakPtr<GpuChannel>* gpu_channel_;
374 IPC::Channel* channel_;
375 scoped_refptr<SyncPointManager> sync_point_manager_;
376 scoped_refptr<base::MessageLoopProxy> message_loop_;
377 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
379 std::queue<PendingMessage> pending_messages_;
381 // Count of the number of IPCs forwarded to the GpuChannel.
382 uint64 messages_forwarded_to_channel_;
384 base::OneShotTimer<GpuChannelMessageFilter> timer_;
386 bool a_stub_is_descheduled_;
389 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
390 GpuWatchdog* watchdog,
391 gfx::GLShareGroup* share_group,
392 gpu::gles2::MailboxManager* mailbox,
395 : gpu_channel_manager_(gpu_channel_manager),
396 messages_processed_(0),
397 client_id_(client_id),
398 share_group_(share_group ? share_group : new gfx::GLShareGroup),
399 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
400 image_manager_(new gpu::gles2::ImageManager),
403 handle_messages_scheduled_(false),
404 processed_get_state_fast_(false),
405 currently_processing_message_(NULL),
407 num_stubs_descheduled_(0) {
408 DCHECK(gpu_channel_manager);
411 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
412 const CommandLine* command_line = CommandLine::ForCurrentProcess();
413 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
417 bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
418 base::WaitableEvent* shutdown_event) {
419 DCHECK(!channel_.get());
421 // Map renderer ID to a (single) channel to that process.
422 channel_.reset(new IPC::SyncChannel(
424 IPC::Channel::MODE_SERVER,
430 base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
431 weak_factory_.GetWeakPtr()));
433 filter_ = new GpuChannelMessageFilter(
435 gpu_channel_manager_->sync_point_manager(),
436 base::MessageLoopProxy::current());
437 io_message_loop_ = io_message_loop;
438 channel_->AddFilter(filter_.get());
440 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
445 std::string GpuChannel::GetChannelName() {
449 #if defined(OS_POSIX)
450 int GpuChannel::TakeRendererFileDescriptor() {
455 return channel_->TakeClientFileDescriptor();
457 #endif // defined(OS_POSIX)
459 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
461 DVLOG(1) << "received message @" << &message << " on channel @" << this
462 << " with type " << message.type();
465 if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
466 if (processed_get_state_fast_) {
467 // Require a non-GetStateFast message in between two GetStateFast
468 // messages, to ensure progress is made.
469 std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
471 while (point != deferred_messages_.end() &&
472 (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
476 if (point != deferred_messages_.end()) {
480 deferred_messages_.insert(point, new IPC::Message(message));
482 // Move GetStateFast commands to the head of the queue, so the renderer
483 // doesn't have to wait any longer than necessary.
484 deferred_messages_.push_front(new IPC::Message(message));
487 deferred_messages_.push_back(new IPC::Message(message));
495 void GpuChannel::OnChannelError() {
496 gpu_channel_manager_->RemoveChannel(client_id_);
499 bool GpuChannel::Send(IPC::Message* message) {
500 // The GPU process must never send a synchronous IPC message to the renderer
501 // process. This could result in deadlock.
502 DCHECK(!message->is_sync());
504 DVLOG(1) << "sending message @" << message << " on channel @" << this
505 << " with type " << message->type();
513 return channel_->Send(message);
516 void GpuChannel::RequeueMessage() {
517 DCHECK(currently_processing_message_);
518 deferred_messages_.push_front(
519 new IPC::Message(*currently_processing_message_));
520 messages_processed_--;
521 currently_processing_message_ = NULL;
524 void GpuChannel::OnScheduled() {
525 if (handle_messages_scheduled_)
527 // Post a task to handle any deferred messages. The deferred message queue is
528 // not emptied here, which ensures that OnMessageReceived will continue to
529 // defer newly received messages until the ones in the queue have all been
530 // handled by HandleMessage. HandleMessage is invoked as a
531 // task to prevent reentrancy.
532 base::MessageLoop::current()->PostTask(
534 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
535 handle_messages_scheduled_ = true;
538 void GpuChannel::StubSchedulingChanged(bool scheduled) {
539 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
541 num_stubs_descheduled_--;
544 num_stubs_descheduled_++;
546 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
547 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
549 if (a_stub_is_descheduled != a_stub_was_descheduled) {
550 if (preempting_flag_.get()) {
551 io_message_loop_->PostTask(
553 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
555 a_stub_is_descheduled));
560 void GpuChannel::CreateViewCommandBuffer(
561 const gfx::GLSurfaceHandle& window,
563 const GPUCreateCommandBufferConfig& init_params,
566 "GpuChannel::CreateViewCommandBuffer",
570 *route_id = MSG_ROUTING_NONE;
572 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
574 // Virtualize compositor contexts on OS X to prevent performance regressions
575 // when enabling FCM.
576 // http://crbug.com/180463
577 bool use_virtualized_gl_context = false;
578 #if defined(OS_MACOSX)
579 use_virtualized_gl_context = true;
582 *route_id = GenerateRouteID();
583 scoped_ptr<GpuCommandBufferStub> stub(
584 new GpuCommandBufferStub(this,
587 mailbox_manager_.get(),
588 image_manager_.get(),
590 disallowed_features_,
592 init_params.gpu_preference,
593 use_virtualized_gl_context,
598 init_params.active_url));
599 if (preempted_flag_.get())
600 stub->SetPreemptByFlag(preempted_flag_);
601 router_.AddRoute(*route_id, stub.get());
602 stubs_.AddWithID(stub.release(), *route_id);
605 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
606 return stubs_.Lookup(route_id);
609 void GpuChannel::CreateImage(
610 gfx::PluginWindowHandle window,
614 "GpuChannel::CreateImage",
620 if (image_manager_->LookupImage(image_id)) {
621 LOG(ERROR) << "CreateImage failed, image_id already in use.";
625 scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
629 image_manager_->AddImage(image.get(), image_id);
630 *size = image->GetSize();
633 void GpuChannel::DeleteImage(int32 image_id) {
635 "GpuChannel::DeleteImage",
639 image_manager_->RemoveImage(image_id);
642 void GpuChannel::LoseAllContexts() {
643 gpu_channel_manager_->LoseAllContexts();
646 void GpuChannel::MarkAllContextsLost() {
647 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
648 !it.IsAtEnd(); it.Advance()) {
649 it.GetCurrentValue()->MarkContextLost();
653 void GpuChannel::DestroySoon() {
654 base::MessageLoop::current()->PostTask(
655 FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
658 int32 GpuChannel::GenerateRouteID() {
659 static int32 last_id = 0;
663 void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
664 router_.AddRoute(route_id, listener);
667 void GpuChannel::RemoveRoute(int32 route_id) {
668 router_.RemoveRoute(route_id);
671 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
672 if (!preempting_flag_.get()) {
673 preempting_flag_ = new gpu::PreemptionFlag;
674 io_message_loop_->PostTask(
675 FROM_HERE, base::Bind(
676 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
677 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
679 return preempting_flag_.get();
682 void GpuChannel::SetPreemptByFlag(
683 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
684 preempted_flag_ = preempted_flag;
686 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
687 !it.IsAtEnd(); it.Advance()) {
688 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
692 GpuChannel::~GpuChannel() {
693 if (preempting_flag_.get())
694 preempting_flag_->Reset();
697 void GpuChannel::OnDestroy() {
698 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
699 gpu_channel_manager_->RemoveChannel(client_id_);
702 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
704 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
705 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
706 OnCreateOffscreenCommandBuffer)
707 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
708 OnDestroyCommandBuffer)
709 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
710 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
711 OnDestroyVideoEncoder)
712 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
713 OnDevToolsStartEventsRecording)
714 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
715 OnDevToolsStopEventsRecording)
716 IPC_MESSAGE_UNHANDLED(handled = false)
717 IPC_END_MESSAGE_MAP()
718 DCHECK(handled) << msg.type();
722 void GpuChannel::HandleMessage() {
723 handle_messages_scheduled_ = false;
724 if (deferred_messages_.empty())
727 bool should_fast_track_ack = false;
728 IPC::Message* m = deferred_messages_.front();
729 GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
733 if (!stub->IsScheduled())
735 if (stub->IsPreempted()) {
741 scoped_ptr<IPC::Message> message(m);
742 deferred_messages_.pop_front();
743 bool message_processed = true;
745 processed_get_state_fast_ =
746 (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
748 currently_processing_message_ = message.get();
750 if (message->routing_id() == MSG_ROUTING_CONTROL)
751 result = OnControlMessageReceived(*message);
753 result = router_.RouteMessage(*message);
754 currently_processing_message_ = NULL;
757 // Respond to sync messages even if router failed to route.
758 if (message->is_sync()) {
759 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
760 reply->set_reply_error();
764 // If the command buffer becomes unscheduled as a result of handling the
765 // message but still has more commands to process, synthesize an IPC
766 // message to flush that command buffer.
768 if (stub->HasUnprocessedCommands()) {
769 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
771 message_processed = false;
775 if (message_processed)
778 // We want the EchoACK following the SwapBuffers to be sent as close as
779 // possible, avoiding scheduling other channels in the meantime.
780 should_fast_track_ack = false;
781 if (!deferred_messages_.empty()) {
782 m = deferred_messages_.front();
783 stub = stubs_.Lookup(m->routing_id());
784 should_fast_track_ack =
785 (m->type() == GpuCommandBufferMsg_Echo::ID) &&
786 stub && stub->IsScheduled();
788 } while (should_fast_track_ack);
790 if (!deferred_messages_.empty()) {
795 void GpuChannel::OnCreateOffscreenCommandBuffer(
796 const gfx::Size& size,
797 const GPUCreateCommandBufferConfig& init_params,
799 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
800 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
802 *route_id = GenerateRouteID();
804 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
807 gfx::GLSurfaceHandle(),
808 mailbox_manager_.get(),
809 image_manager_.get(),
811 disallowed_features_,
813 init_params.gpu_preference,
819 init_params.active_url));
820 if (preempted_flag_.get())
821 stub->SetPreemptByFlag(preempted_flag_);
822 router_.AddRoute(*route_id, stub.get());
823 stubs_.AddWithID(stub.release(), *route_id);
824 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
825 "route_id", route_id);
828 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
829 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
830 "route_id", route_id);
832 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
835 bool need_reschedule = (stub && !stub->IsScheduled());
836 router_.RemoveRoute(route_id);
837 stubs_.Remove(route_id);
838 // In case the renderer is currently blocked waiting for a sync reply from the
839 // stub, we need to make sure to reschedule the GpuChannel here.
840 if (need_reschedule) {
841 // This stub won't get a chance to reschedule, so update the count now.
842 StubSchedulingChanged(true);
846 void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
847 TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
849 *route_id = GenerateRouteID();
850 GpuVideoEncodeAccelerator* encoder =
851 new GpuVideoEncodeAccelerator(this, *route_id);
852 router_.AddRoute(*route_id, encoder);
853 video_encoders_.AddWithID(encoder, *route_id);
856 void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
858 "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
859 GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
862 router_.RemoveRoute(route_id);
863 video_encoders_.Remove(route_id);
866 void GpuChannel::OnDevToolsStartEventsRecording(int32* route_id) {
867 devtools_gpu_agent_->StartEventsRecording(route_id);
870 void GpuChannel::OnDevToolsStopEventsRecording() {
871 devtools_gpu_agent_->StopEventsRecording();
874 void GpuChannel::MessageProcessed() {
875 messages_processed_++;
876 if (preempting_flag_.get()) {
877 io_message_loop_->PostTask(
879 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
881 messages_processed_));
885 void GpuChannel::CacheShader(const std::string& key,
886 const std::string& shader) {
887 gpu_channel_manager_->Send(
888 new GpuHostMsg_CacheShader(client_id_, key, shader));
891 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
892 channel_->AddFilter(filter);
895 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
896 channel_->RemoveFilter(filter);
899 } // namespace content