1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/stl_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/devtools_gpu_agent.h"
22 #include "content/common/gpu/gpu_channel_manager.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_manager.h"
29 #include "gpu/command_buffer/service/mailbox_manager.h"
30 #include "ipc/ipc_channel.h"
31 #include "ipc/message_filter.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_surface.h"
37 #include "ipc/ipc_channel_posix.h"
43 // Number of milliseconds between successive vsync. Many GL commands block
44 // on vsync, so thresholds for preemption should be multiples of this.
45 const int64 kVsyncIntervalMs = 17;
47 // Amount of time that we will wait for an IPC to be processed before
48 // preempting. After a preemption, we must wait this long before triggering
49 // another preemption.
50 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
52 // Once we trigger a preemption, the maximum duration that we will wait
53 // before clearing the preemption.
54 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
56 // Stop the preemption once the time for the longest pending IPC drops
57 // below this threshold.
58 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
60 } // anonymous namespace
62 // This filter does three things:
63 // - it counts and timestamps each message forwarded to the channel
64 // so that we can preempt other channels if a message takes too long to
65 // process. To guarantee fairness, we must wait a minimum amount of time
66 // before preempting and we limit the amount of time that we can preempt in
67 // one shot (see constants above).
68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
69 // thread, generating the sync point ID and responding immediately, and then
70 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
71 // into the channel's queue.
72 // - it generates mailbox names for clients of the GPU process on the IO thread.
73 class GpuChannelMessageFilter : public IPC::MessageFilter {
75 GpuChannelMessageFilter(base::WeakPtr<GpuChannel> gpu_channel,
76 scoped_refptr<SyncPointManager> sync_point_manager,
77 scoped_refptr<base::MessageLoopProxy> message_loop)
78 : preemption_state_(IDLE),
79 gpu_channel_(gpu_channel),
81 sync_point_manager_(sync_point_manager),
82 message_loop_(message_loop),
83 messages_forwarded_to_channel_(0),
84 a_stub_is_descheduled_(false) {}
86 virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
91 virtual void OnFilterRemoved() OVERRIDE {
96 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
100 if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
101 // This message should not be sent explicitly by the renderer.
102 DLOG(ERROR) << "Client should not send "
103 "GpuCommandBufferMsg_RetireSyncPoint message";
107 // All other messages get processed by the GpuChannel.
109 messages_forwarded_to_channel_++;
110 if (preempting_flag_.get())
111 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
112 UpdatePreemptionState();
115 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
116 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
117 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
118 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
120 message_loop_->PostTask(FROM_HERE, base::Bind(
121 &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
124 message.routing_id(),
131 void MessageProcessed(uint64 messages_processed) {
132 while (!pending_messages_.empty() &&
133 pending_messages_.front().message_number <= messages_processed)
134 pending_messages_.pop();
135 UpdatePreemptionState();
138 void SetPreemptingFlagAndSchedulingState(
139 gpu::PreemptionFlag* preempting_flag,
140 bool a_stub_is_descheduled) {
141 preempting_flag_ = preempting_flag;
142 a_stub_is_descheduled_ = a_stub_is_descheduled;
145 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
146 a_stub_is_descheduled_ = a_stub_is_descheduled;
147 UpdatePreemptionState();
150 bool Send(IPC::Message* message) {
151 return sender_->Send(message);
155 virtual ~GpuChannelMessageFilter() {}
158 enum PreemptionState {
159 // Either there's no other channel to preempt, there are no messages
160 // pending processing, or we just finished preempting and have to wait
161 // before preempting again.
163 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
165 // We can preempt whenever any IPC processing takes more than
166 // kPreemptWaitTimeMs.
168 // We are currently preempting (i.e. no stub is descheduled).
170 // We would like to preempt, but some stub is descheduled.
171 WOULD_PREEMPT_DESCHEDULED,
174 PreemptionState preemption_state_;
176 // Maximum amount of time that we can spend in PREEMPTING.
177 // It is reset when we transition to IDLE.
178 base::TimeDelta max_preemption_time_;
180 struct PendingMessage {
181 uint64 message_number;
182 base::TimeTicks time_received;
184 explicit PendingMessage(uint64 message_number)
185 : message_number(message_number),
186 time_received(base::TimeTicks::Now()) {
190 void UpdatePreemptionState() {
191 switch (preemption_state_) {
193 if (preempting_flag_.get() && !pending_messages_.empty())
194 TransitionToWaiting();
197 // A timer will transition us to CHECKING.
198 DCHECK(timer_.IsRunning());
201 if (!pending_messages_.empty()) {
202 base::TimeDelta time_elapsed =
203 base::TimeTicks::Now() - pending_messages_.front().time_received;
204 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
205 // Schedule another check for when the IPC may go long.
208 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
210 this, &GpuChannelMessageFilter::UpdatePreemptionState);
212 if (a_stub_is_descheduled_)
213 TransitionToWouldPreemptDescheduled();
215 TransitionToPreempting();
220 // A TransitionToIdle() timer should always be running in this state.
221 DCHECK(timer_.IsRunning());
222 if (a_stub_is_descheduled_)
223 TransitionToWouldPreemptDescheduled();
225 TransitionToIdleIfCaughtUp();
227 case WOULD_PREEMPT_DESCHEDULED:
228 // A TransitionToIdle() timer should never be running in this state.
229 DCHECK(!timer_.IsRunning());
230 if (!a_stub_is_descheduled_)
231 TransitionToPreempting();
233 TransitionToIdleIfCaughtUp();
240 void TransitionToIdleIfCaughtUp() {
241 DCHECK(preemption_state_ == PREEMPTING ||
242 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
243 if (pending_messages_.empty()) {
246 base::TimeDelta time_elapsed =
247 base::TimeTicks::Now() - pending_messages_.front().time_received;
248 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
253 void TransitionToIdle() {
254 DCHECK(preemption_state_ == PREEMPTING ||
255 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
256 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
259 preemption_state_ = IDLE;
260 preempting_flag_->Reset();
261 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
263 UpdatePreemptionState();
266 void TransitionToWaiting() {
267 DCHECK_EQ(preemption_state_, IDLE);
268 DCHECK(!timer_.IsRunning());
270 preemption_state_ = WAITING;
273 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
274 this, &GpuChannelMessageFilter::TransitionToChecking);
277 void TransitionToChecking() {
278 DCHECK_EQ(preemption_state_, WAITING);
279 DCHECK(!timer_.IsRunning());
281 preemption_state_ = CHECKING;
282 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
283 UpdatePreemptionState();
286 void TransitionToPreempting() {
287 DCHECK(preemption_state_ == CHECKING ||
288 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
289 DCHECK(!a_stub_is_descheduled_);
291 // Stop any pending state update checks that we may have queued
293 if (preemption_state_ == CHECKING)
296 preemption_state_ = PREEMPTING;
297 preempting_flag_->Set();
298 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
302 max_preemption_time_,
303 this, &GpuChannelMessageFilter::TransitionToIdle);
305 UpdatePreemptionState();
308 void TransitionToWouldPreemptDescheduled() {
309 DCHECK(preemption_state_ == CHECKING ||
310 preemption_state_ == PREEMPTING);
311 DCHECK(a_stub_is_descheduled_);
313 if (preemption_state_ == CHECKING) {
314 // Stop any pending state update checks that we may have queued
318 // Stop any TransitionToIdle() timers that we may have queued
321 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
322 if (max_preemption_time_ < base::TimeDelta()) {
328 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
329 preempting_flag_->Reset();
330 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
332 UpdatePreemptionState();
335 static void InsertSyncPointOnMainThread(
336 base::WeakPtr<GpuChannel> gpu_channel,
337 scoped_refptr<SyncPointManager> manager,
340 // This function must ensure that the sync point will be retired. Normally
341 // we'll find the stub based on the routing ID, and associate the sync point
342 // with it, but if that fails for any reason (channel or stub already
343 // deleted, invalid routing id), we need to retire the sync point
346 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
348 stub->AddSyncPoint(sync_point);
349 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
350 gpu_channel->OnMessageReceived(message);
353 gpu_channel->MessageProcessed();
356 manager->RetireSyncPoint(sync_point);
359 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
360 // passed through - therefore the WeakPtr assumptions are respected.
361 base::WeakPtr<GpuChannel> gpu_channel_;
362 IPC::Sender* sender_;
363 scoped_refptr<SyncPointManager> sync_point_manager_;
364 scoped_refptr<base::MessageLoopProxy> message_loop_;
365 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
367 std::queue<PendingMessage> pending_messages_;
369 // Count of the number of IPCs forwarded to the GpuChannel.
370 uint64 messages_forwarded_to_channel_;
372 base::OneShotTimer<GpuChannelMessageFilter> timer_;
374 bool a_stub_is_descheduled_;
377 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
378 GpuWatchdog* watchdog,
379 gfx::GLShareGroup* share_group,
380 gpu::gles2::MailboxManager* mailbox,
383 : gpu_channel_manager_(gpu_channel_manager),
384 messages_processed_(0),
385 client_id_(client_id),
386 share_group_(share_group ? share_group : new gfx::GLShareGroup),
387 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
388 image_manager_(new gpu::gles2::ImageManager),
391 handle_messages_scheduled_(false),
392 currently_processing_message_(NULL),
394 num_stubs_descheduled_(0) {
395 DCHECK(gpu_channel_manager);
398 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
399 const CommandLine* command_line = CommandLine::ForCurrentProcess();
400 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
403 GpuChannel::~GpuChannel() {
404 STLDeleteElements(&deferred_messages_);
405 if (preempting_flag_.get())
406 preempting_flag_->Reset();
409 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
410 base::WaitableEvent* shutdown_event) {
411 DCHECK(!channel_.get());
413 // Map renderer ID to a (single) channel to that process.
414 channel_ = IPC::SyncChannel::Create(channel_id_,
415 IPC::Channel::MODE_SERVER,
422 new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
423 gpu_channel_manager_->sync_point_manager(),
424 base::MessageLoopProxy::current());
425 io_message_loop_ = io_message_loop;
426 channel_->AddFilter(filter_.get());
428 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
431 std::string GpuChannel::GetChannelName() {
435 #if defined(OS_POSIX)
436 int GpuChannel::TakeRendererFileDescriptor() {
441 return channel_->TakeClientFileDescriptor();
443 #endif // defined(OS_POSIX)
445 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
447 DVLOG(1) << "received message @" << &message << " on channel @" << this
448 << " with type " << message.type();
451 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
452 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
453 // Move Wait commands to the head of the queue, so the renderer
454 // doesn't have to wait any longer than necessary.
455 deferred_messages_.push_front(new IPC::Message(message));
457 deferred_messages_.push_back(new IPC::Message(message));
465 void GpuChannel::OnChannelError() {
466 gpu_channel_manager_->RemoveChannel(client_id_);
469 bool GpuChannel::Send(IPC::Message* message) {
470 // The GPU process must never send a synchronous IPC message to the renderer
471 // process. This could result in deadlock.
472 DCHECK(!message->is_sync());
474 DVLOG(1) << "sending message @" << message << " on channel @" << this
475 << " with type " << message->type();
483 return channel_->Send(message);
486 void GpuChannel::RequeueMessage() {
487 DCHECK(currently_processing_message_);
488 deferred_messages_.push_front(
489 new IPC::Message(*currently_processing_message_));
490 messages_processed_--;
491 currently_processing_message_ = NULL;
494 void GpuChannel::OnScheduled() {
495 if (handle_messages_scheduled_)
497 // Post a task to handle any deferred messages. The deferred message queue is
498 // not emptied here, which ensures that OnMessageReceived will continue to
499 // defer newly received messages until the ones in the queue have all been
500 // handled by HandleMessage. HandleMessage is invoked as a
501 // task to prevent reentrancy.
502 base::MessageLoop::current()->PostTask(
504 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
505 handle_messages_scheduled_ = true;
508 void GpuChannel::StubSchedulingChanged(bool scheduled) {
509 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
511 num_stubs_descheduled_--;
514 num_stubs_descheduled_++;
516 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
517 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
519 if (a_stub_is_descheduled != a_stub_was_descheduled) {
520 if (preempting_flag_.get()) {
521 io_message_loop_->PostTask(
523 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
525 a_stub_is_descheduled));
530 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
531 const gfx::GLSurfaceHandle& window,
533 const GPUCreateCommandBufferConfig& init_params,
536 "GpuChannel::CreateViewCommandBuffer",
540 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
542 // Virtualize compositor contexts on OS X to prevent performance regressions
543 // when enabling FCM.
544 // http://crbug.com/180463
545 bool use_virtualized_gl_context = false;
546 #if defined(OS_MACOSX)
547 use_virtualized_gl_context = true;
550 scoped_ptr<GpuCommandBufferStub> stub(
551 new GpuCommandBufferStub(this,
554 mailbox_manager_.get(),
555 image_manager_.get(),
557 disallowed_features_,
559 init_params.gpu_preference,
560 use_virtualized_gl_context,
565 init_params.active_url));
566 if (preempted_flag_.get())
567 stub->SetPreemptByFlag(preempted_flag_);
568 if (!router_.AddRoute(route_id, stub.get())) {
569 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
570 "failed to add route";
571 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
573 stubs_.AddWithID(stub.release(), route_id);
574 return CREATE_COMMAND_BUFFER_SUCCEEDED;
577 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
578 return stubs_.Lookup(route_id);
581 void GpuChannel::CreateImage(
582 gfx::PluginWindowHandle window,
586 "GpuChannel::CreateImage",
592 if (image_manager_->LookupImage(image_id)) {
593 LOG(ERROR) << "CreateImage failed, image_id already in use.";
597 scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
601 image_manager_->AddImage(image.get(), image_id);
602 *size = image->GetSize();
605 void GpuChannel::DeleteImage(int32 image_id) {
607 "GpuChannel::DeleteImage",
611 image_manager_->RemoveImage(image_id);
614 void GpuChannel::LoseAllContexts() {
615 gpu_channel_manager_->LoseAllContexts();
618 void GpuChannel::MarkAllContextsLost() {
619 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
620 !it.IsAtEnd(); it.Advance()) {
621 it.GetCurrentValue()->MarkContextLost();
625 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
626 return router_.AddRoute(route_id, listener);
629 void GpuChannel::RemoveRoute(int32 route_id) {
630 router_.RemoveRoute(route_id);
633 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
634 if (!preempting_flag_.get()) {
635 preempting_flag_ = new gpu::PreemptionFlag;
636 io_message_loop_->PostTask(
637 FROM_HERE, base::Bind(
638 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
639 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
641 return preempting_flag_.get();
644 void GpuChannel::SetPreemptByFlag(
645 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
646 preempted_flag_ = preempted_flag;
648 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
649 !it.IsAtEnd(); it.Advance()) {
650 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
654 void GpuChannel::OnDestroy() {
655 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
656 gpu_channel_manager_->RemoveChannel(client_id_);
659 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
661 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
662 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
663 OnCreateOffscreenCommandBuffer)
664 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
665 OnDestroyCommandBuffer)
666 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
667 OnDevToolsStartEventsRecording)
668 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
669 OnDevToolsStopEventsRecording)
670 IPC_MESSAGE_UNHANDLED(handled = false)
671 IPC_END_MESSAGE_MAP()
672 DCHECK(handled) << msg.type();
676 void GpuChannel::HandleMessage() {
677 handle_messages_scheduled_ = false;
678 if (deferred_messages_.empty())
681 bool should_fast_track_ack = false;
682 IPC::Message* m = deferred_messages_.front();
683 GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
687 if (!stub->IsScheduled())
689 if (stub->IsPreempted()) {
695 scoped_ptr<IPC::Message> message(m);
696 deferred_messages_.pop_front();
697 bool message_processed = true;
699 currently_processing_message_ = message.get();
701 if (message->routing_id() == MSG_ROUTING_CONTROL)
702 result = OnControlMessageReceived(*message);
704 result = router_.RouteMessage(*message);
705 currently_processing_message_ = NULL;
708 // Respond to sync messages even if router failed to route.
709 if (message->is_sync()) {
710 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
711 reply->set_reply_error();
715 // If the command buffer becomes unscheduled as a result of handling the
716 // message but still has more commands to process, synthesize an IPC
717 // message to flush that command buffer.
719 if (stub->HasUnprocessedCommands()) {
720 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
722 message_processed = false;
726 if (message_processed)
729 // We want the EchoACK following the SwapBuffers to be sent as close as
730 // possible, avoiding scheduling other channels in the meantime.
731 should_fast_track_ack = false;
732 if (!deferred_messages_.empty()) {
733 m = deferred_messages_.front();
734 stub = stubs_.Lookup(m->routing_id());
735 should_fast_track_ack =
736 (m->type() == GpuCommandBufferMsg_Echo::ID) &&
737 stub && stub->IsScheduled();
739 } while (should_fast_track_ack);
741 if (!deferred_messages_.empty()) {
746 void GpuChannel::OnCreateOffscreenCommandBuffer(
747 const gfx::Size& size,
748 const GPUCreateCommandBufferConfig& init_params,
751 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
752 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
754 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
757 gfx::GLSurfaceHandle(),
758 mailbox_manager_.get(),
759 image_manager_.get(),
761 disallowed_features_,
763 init_params.gpu_preference,
769 init_params.active_url));
770 if (preempted_flag_.get())
771 stub->SetPreemptByFlag(preempted_flag_);
772 if (!router_.AddRoute(route_id, stub.get())) {
773 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
774 "failed to add route";
778 stubs_.AddWithID(stub.release(), route_id);
779 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
780 "route_id", route_id);
784 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
785 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
786 "route_id", route_id);
788 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
791 bool need_reschedule = (stub && !stub->IsScheduled());
792 router_.RemoveRoute(route_id);
793 stubs_.Remove(route_id);
794 // In case the renderer is currently blocked waiting for a sync reply from the
795 // stub, we need to make sure to reschedule the GpuChannel here.
796 if (need_reschedule) {
797 // This stub won't get a chance to reschedule, so update the count now.
798 StubSchedulingChanged(true);
802 void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id,
804 *succeeded = devtools_gpu_agent_->StartEventsRecording(route_id);
807 void GpuChannel::OnDevToolsStopEventsRecording() {
808 devtools_gpu_agent_->StopEventsRecording();
811 void GpuChannel::MessageProcessed() {
812 messages_processed_++;
813 if (preempting_flag_.get()) {
814 io_message_loop_->PostTask(
816 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
818 messages_processed_));
822 void GpuChannel::CacheShader(const std::string& key,
823 const std::string& shader) {
824 gpu_channel_manager_->Send(
825 new GpuHostMsg_CacheShader(client_id_, key, shader));
828 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
829 channel_->AddFilter(filter);
832 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
833 channel_->RemoveFilter(filter);
836 uint64 GpuChannel::GetMemoryUsage() {
838 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
839 !it.IsAtEnd(); it.Advance()) {
840 size += it.GetCurrentValue()->GetMemoryUsage();
845 } // namespace content