1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/rand_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/devtools_gpu_agent.h"
22 #include "content/common/gpu/gpu_channel_manager.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
25 #include "content/common/gpu/sync_point_manager.h"
26 #include "content/public/common/content_switches.h"
27 #include "crypto/hmac.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gpu_scheduler.h"
30 #include "gpu/command_buffer/service/image_manager.h"
31 #include "gpu/command_buffer/service/mailbox_manager.h"
32 #include "ipc/ipc_channel.h"
33 #include "ipc/ipc_channel_proxy.h"
34 #include "ui/gl/gl_context.h"
35 #include "ui/gl/gl_image.h"
36 #include "ui/gl/gl_surface.h"
39 #include "ipc/ipc_channel_posix.h"
45 // Number of milliseconds between successive vsync. Many GL commands block
46 // on vsync, so thresholds for preemption should be multiples of this.
47 const int64 kVsyncIntervalMs = 17;
49 // Amount of time that we will wait for an IPC to be processed before
50 // preempting. After a preemption, we must wait this long before triggering
51 // another preemption.
52 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
54 // Once we trigger a preemption, the maximum duration that we will wait
55 // before clearing the preemption.
56 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
58 // Stop the preemption once the time for the longest pending IPC drops
59 // below this threshold.
60 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
62 } // anonymous namespace
64 // This filter does three things:
65 // - it counts and timestamps each message forwarded to the channel
66 // so that we can preempt other channels if a message takes too long to
67 // process. To guarantee fairness, we must wait a minimum amount of time
68 // before preempting and we limit the amount of time that we can preempt in
69 // one shot (see constants above).
70 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
71 // thread, generating the sync point ID and responding immediately, and then
72 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
73 // into the channel's queue.
74 // - it generates mailbox names for clients of the GPU process on the IO thread.
75 class GpuChannelMessageFilter : public IPC::ChannelProxy::MessageFilter {
77 // Takes ownership of gpu_channel (see below).
78 GpuChannelMessageFilter(const std::string& private_key,
79 base::WeakPtr<GpuChannel>* gpu_channel,
80 scoped_refptr<SyncPointManager> sync_point_manager,
81 scoped_refptr<base::MessageLoopProxy> message_loop)
82 : preemption_state_(IDLE),
83 gpu_channel_(gpu_channel),
85 sync_point_manager_(sync_point_manager),
86 message_loop_(message_loop),
87 messages_forwarded_to_channel_(0),
88 a_stub_is_descheduled_(false),
89 hmac_(crypto::HMAC::SHA256) {
90 bool success = hmac_.Init(base::StringPiece(private_key));
94 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
99 virtual void OnFilterRemoved() OVERRIDE {
104 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
108 IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter, message)
109 IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames,
110 OnGenerateMailboxNames)
111 IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync,
112 OnGenerateMailboxNamesAsync)
113 IPC_MESSAGE_UNHANDLED(handled = false)
114 IPC_END_MESSAGE_MAP()
116 if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
117 // This message should not be sent explicitly by the renderer.
122 // All other messages get processed by the GpuChannel.
124 messages_forwarded_to_channel_++;
125 if (preempting_flag_.get())
126 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
127 UpdatePreemptionState();
130 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
131 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
132 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
133 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
135 message_loop_->PostTask(FROM_HERE, base::Bind(
136 &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
139 message.routing_id(),
146 void MessageProcessed(uint64 messages_processed) {
147 while (!pending_messages_.empty() &&
148 pending_messages_.front().message_number <= messages_processed)
149 pending_messages_.pop();
150 UpdatePreemptionState();
153 void SetPreemptingFlagAndSchedulingState(
154 gpu::PreemptionFlag* preempting_flag,
155 bool a_stub_is_descheduled) {
156 preempting_flag_ = preempting_flag;
157 a_stub_is_descheduled_ = a_stub_is_descheduled;
160 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
161 a_stub_is_descheduled_ = a_stub_is_descheduled;
162 UpdatePreemptionState();
165 bool Send(IPC::Message* message) {
166 return channel_->Send(message);
170 virtual ~GpuChannelMessageFilter() {
171 message_loop_->PostTask(FROM_HERE, base::Bind(
172 &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
177 void OnGenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* result) {
178 TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num);
182 for (unsigned i = 0; i < num; ++i) {
183 char name[GL_MAILBOX_SIZE_CHROMIUM];
184 base::RandBytes(name, sizeof(name) / 2);
186 bool success = hmac_.Sign(
187 base::StringPiece(name, sizeof(name) / 2),
188 reinterpret_cast<unsigned char*>(name) + sizeof(name) / 2,
192 (*result)[i].SetName(reinterpret_cast<int8*>(name));
196 void OnGenerateMailboxNamesAsync(unsigned num) {
197 std::vector<gpu::Mailbox> names;
198 OnGenerateMailboxNames(num, &names);
199 Send(new GpuChannelMsg_GenerateMailboxNamesReply(names));
202 enum PreemptionState {
203 // Either there's no other channel to preempt, there are no messages
204 // pending processing, or we just finished preempting and have to wait
205 // before preempting again.
207 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
209 // We can preempt whenever any IPC processing takes more than
210 // kPreemptWaitTimeMs.
212 // We are currently preempting (i.e. no stub is descheduled).
214 // We would like to preempt, but some stub is descheduled.
215 WOULD_PREEMPT_DESCHEDULED,
218 PreemptionState preemption_state_;
220 // Maximum amount of time that we can spend in PREEMPTING.
221 // It is reset when we transition to IDLE.
222 base::TimeDelta max_preemption_time_;
224 struct PendingMessage {
225 uint64 message_number;
226 base::TimeTicks time_received;
228 explicit PendingMessage(uint64 message_number)
229 : message_number(message_number),
230 time_received(base::TimeTicks::Now()) {
234 void UpdatePreemptionState() {
235 switch (preemption_state_) {
237 if (preempting_flag_.get() && !pending_messages_.empty())
238 TransitionToWaiting();
241 // A timer will transition us to CHECKING.
242 DCHECK(timer_.IsRunning());
245 if (!pending_messages_.empty()) {
246 base::TimeDelta time_elapsed =
247 base::TimeTicks::Now() - pending_messages_.front().time_received;
248 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
249 // Schedule another check for when the IPC may go long.
252 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
254 this, &GpuChannelMessageFilter::UpdatePreemptionState);
256 if (a_stub_is_descheduled_)
257 TransitionToWouldPreemptDescheduled();
259 TransitionToPreempting();
264 // A TransitionToIdle() timer should always be running in this state.
265 DCHECK(timer_.IsRunning());
266 if (a_stub_is_descheduled_)
267 TransitionToWouldPreemptDescheduled();
269 TransitionToIdleIfCaughtUp();
271 case WOULD_PREEMPT_DESCHEDULED:
272 // A TransitionToIdle() timer should never be running in this state.
273 DCHECK(!timer_.IsRunning());
274 if (!a_stub_is_descheduled_)
275 TransitionToPreempting();
277 TransitionToIdleIfCaughtUp();
284 void TransitionToIdleIfCaughtUp() {
285 DCHECK(preemption_state_ == PREEMPTING ||
286 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
287 if (pending_messages_.empty()) {
290 base::TimeDelta time_elapsed =
291 base::TimeTicks::Now() - pending_messages_.front().time_received;
292 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
297 void TransitionToIdle() {
298 DCHECK(preemption_state_ == PREEMPTING ||
299 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
300 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
303 preemption_state_ = IDLE;
304 preempting_flag_->Reset();
305 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
307 UpdatePreemptionState();
310 void TransitionToWaiting() {
311 DCHECK_EQ(preemption_state_, IDLE);
312 DCHECK(!timer_.IsRunning());
314 preemption_state_ = WAITING;
317 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
318 this, &GpuChannelMessageFilter::TransitionToChecking);
321 void TransitionToChecking() {
322 DCHECK_EQ(preemption_state_, WAITING);
323 DCHECK(!timer_.IsRunning());
325 preemption_state_ = CHECKING;
326 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
327 UpdatePreemptionState();
330 void TransitionToPreempting() {
331 DCHECK(preemption_state_ == CHECKING ||
332 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
333 DCHECK(!a_stub_is_descheduled_);
335 // Stop any pending state update checks that we may have queued
337 if (preemption_state_ == CHECKING)
340 preemption_state_ = PREEMPTING;
341 preempting_flag_->Set();
342 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
346 max_preemption_time_,
347 this, &GpuChannelMessageFilter::TransitionToIdle);
349 UpdatePreemptionState();
352 void TransitionToWouldPreemptDescheduled() {
353 DCHECK(preemption_state_ == CHECKING ||
354 preemption_state_ == PREEMPTING);
355 DCHECK(a_stub_is_descheduled_);
357 if (preemption_state_ == CHECKING) {
358 // Stop any pending state update checks that we may have queued
362 // Stop any TransitionToIdle() timers that we may have queued
365 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
366 if (max_preemption_time_ < base::TimeDelta()) {
372 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
373 preempting_flag_->Reset();
374 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
376 UpdatePreemptionState();
379 static void InsertSyncPointOnMainThread(
380 base::WeakPtr<GpuChannel>* gpu_channel,
381 scoped_refptr<SyncPointManager> manager,
384 // This function must ensure that the sync point will be retired. Normally
385 // we'll find the stub based on the routing ID, and associate the sync point
386 // with it, but if that fails for any reason (channel or stub already
387 // deleted, invalid routing id), we need to retire the sync point
389 if (gpu_channel->get()) {
390 GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
393 stub->AddSyncPoint(sync_point);
394 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
395 gpu_channel->get()->OnMessageReceived(message);
398 gpu_channel->get()->MessageProcessed();
401 manager->RetireSyncPoint(sync_point);
404 static void DeleteWeakPtrOnMainThread(
405 base::WeakPtr<GpuChannel>* gpu_channel) {
409 // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
410 // IO thread, it's only passed through - therefore the WeakPtr assumptions are
412 base::WeakPtr<GpuChannel>* gpu_channel_;
413 IPC::Channel* channel_;
414 scoped_refptr<SyncPointManager> sync_point_manager_;
415 scoped_refptr<base::MessageLoopProxy> message_loop_;
416 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
418 std::queue<PendingMessage> pending_messages_;
420 // Count of the number of IPCs forwarded to the GpuChannel.
421 uint64 messages_forwarded_to_channel_;
423 base::OneShotTimer<GpuChannelMessageFilter> timer_;
425 bool a_stub_is_descheduled_;
430 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
431 GpuWatchdog* watchdog,
432 gfx::GLShareGroup* share_group,
433 gpu::gles2::MailboxManager* mailbox,
436 : gpu_channel_manager_(gpu_channel_manager),
437 messages_processed_(0),
438 client_id_(client_id),
439 share_group_(share_group ? share_group : new gfx::GLShareGroup),
440 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
441 image_manager_(new gpu::gles2::ImageManager),
444 handle_messages_scheduled_(false),
445 processed_get_state_fast_(false),
446 currently_processing_message_(NULL),
448 num_stubs_descheduled_(0) {
449 DCHECK(gpu_channel_manager);
452 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
453 const CommandLine* command_line = CommandLine::ForCurrentProcess();
454 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
455 disallowed_features_.multisampling =
456 command_line->HasSwitch(switches::kDisableGLMultisampling);
460 bool GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
461 base::WaitableEvent* shutdown_event) {
462 DCHECK(!channel_.get());
464 // Map renderer ID to a (single) channel to that process.
465 channel_.reset(new IPC::SyncChannel(
467 IPC::Channel::MODE_SERVER,
473 base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
474 weak_factory_.GetWeakPtr()));
476 filter_ = new GpuChannelMessageFilter(
477 mailbox_manager_->private_key(),
479 gpu_channel_manager_->sync_point_manager(),
480 base::MessageLoopProxy::current());
481 io_message_loop_ = io_message_loop;
482 channel_->AddFilter(filter_.get());
484 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
489 std::string GpuChannel::GetChannelName() {
493 #if defined(OS_POSIX)
494 int GpuChannel::TakeRendererFileDescriptor() {
499 return channel_->TakeClientFileDescriptor();
501 #endif // defined(OS_POSIX)
503 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
505 DVLOG(1) << "received message @" << &message << " on channel @" << this
506 << " with type " << message.type();
509 if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) {
510 if (processed_get_state_fast_) {
511 // Require a non-GetStateFast message in between two GetStateFast
512 // messages, to ensure progress is made.
513 std::deque<IPC::Message*>::iterator point = deferred_messages_.begin();
515 while (point != deferred_messages_.end() &&
516 (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) {
520 if (point != deferred_messages_.end()) {
524 deferred_messages_.insert(point, new IPC::Message(message));
526 // Move GetStateFast commands to the head of the queue, so the renderer
527 // doesn't have to wait any longer than necessary.
528 deferred_messages_.push_front(new IPC::Message(message));
531 deferred_messages_.push_back(new IPC::Message(message));
539 void GpuChannel::OnChannelError() {
540 gpu_channel_manager_->RemoveChannel(client_id_);
543 bool GpuChannel::Send(IPC::Message* message) {
544 // The GPU process must never send a synchronous IPC message to the renderer
545 // process. This could result in deadlock.
546 DCHECK(!message->is_sync());
548 DVLOG(1) << "sending message @" << message << " on channel @" << this
549 << " with type " << message->type();
557 return channel_->Send(message);
560 void GpuChannel::RequeueMessage() {
561 DCHECK(currently_processing_message_);
562 deferred_messages_.push_front(
563 new IPC::Message(*currently_processing_message_));
564 messages_processed_--;
565 currently_processing_message_ = NULL;
568 void GpuChannel::OnScheduled() {
569 if (handle_messages_scheduled_)
571 // Post a task to handle any deferred messages. The deferred message queue is
572 // not emptied here, which ensures that OnMessageReceived will continue to
573 // defer newly received messages until the ones in the queue have all been
574 // handled by HandleMessage. HandleMessage is invoked as a
575 // task to prevent reentrancy.
576 base::MessageLoop::current()->PostTask(
578 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
579 handle_messages_scheduled_ = true;
582 void GpuChannel::StubSchedulingChanged(bool scheduled) {
583 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
585 num_stubs_descheduled_--;
588 num_stubs_descheduled_++;
590 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
591 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
593 if (a_stub_is_descheduled != a_stub_was_descheduled) {
594 if (preempting_flag_.get()) {
595 io_message_loop_->PostTask(
597 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
599 a_stub_is_descheduled));
604 void GpuChannel::CreateViewCommandBuffer(
605 const gfx::GLSurfaceHandle& window,
607 const GPUCreateCommandBufferConfig& init_params,
610 "GpuChannel::CreateViewCommandBuffer",
614 *route_id = MSG_ROUTING_NONE;
616 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
618 // Virtualize compositor contexts on OS X to prevent performance regressions
619 // when enabling FCM.
620 // http://crbug.com/180463
621 bool use_virtualized_gl_context = false;
622 #if defined(OS_MACOSX)
623 use_virtualized_gl_context = true;
626 *route_id = GenerateRouteID();
627 scoped_ptr<GpuCommandBufferStub> stub(
628 new GpuCommandBufferStub(this,
631 mailbox_manager_.get(),
632 image_manager_.get(),
634 disallowed_features_,
636 init_params.gpu_preference,
637 use_virtualized_gl_context,
642 init_params.active_url));
643 if (preempted_flag_.get())
644 stub->SetPreemptByFlag(preempted_flag_);
645 router_.AddRoute(*route_id, stub.get());
646 stubs_.AddWithID(stub.release(), *route_id);
649 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
650 return stubs_.Lookup(route_id);
653 void GpuChannel::CreateImage(
654 gfx::PluginWindowHandle window,
658 "GpuChannel::CreateImage",
664 if (image_manager_->LookupImage(image_id)) {
665 LOG(ERROR) << "CreateImage failed, image_id already in use.";
669 scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
673 image_manager_->AddImage(image.get(), image_id);
674 *size = image->GetSize();
677 void GpuChannel::DeleteImage(int32 image_id) {
679 "GpuChannel::DeleteImage",
683 image_manager_->RemoveImage(image_id);
686 void GpuChannel::LoseAllContexts() {
687 gpu_channel_manager_->LoseAllContexts();
690 void GpuChannel::MarkAllContextsLost() {
691 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
692 !it.IsAtEnd(); it.Advance()) {
693 it.GetCurrentValue()->MarkContextLost();
697 void GpuChannel::DestroySoon() {
698 base::MessageLoop::current()->PostTask(
699 FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
702 int32 GpuChannel::GenerateRouteID() {
703 static int32 last_id = 0;
707 void GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
708 router_.AddRoute(route_id, listener);
711 void GpuChannel::RemoveRoute(int32 route_id) {
712 router_.RemoveRoute(route_id);
715 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
716 if (!preempting_flag_.get()) {
717 preempting_flag_ = new gpu::PreemptionFlag;
718 io_message_loop_->PostTask(
719 FROM_HERE, base::Bind(
720 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
721 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
723 return preempting_flag_.get();
726 void GpuChannel::SetPreemptByFlag(
727 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
728 preempted_flag_ = preempted_flag;
730 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
731 !it.IsAtEnd(); it.Advance()) {
732 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
736 GpuChannel::~GpuChannel() {
737 if (preempting_flag_.get())
738 preempting_flag_->Reset();
741 void GpuChannel::OnDestroy() {
742 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
743 gpu_channel_manager_->RemoveChannel(client_id_);
746 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
748 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
749 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
750 OnCreateOffscreenCommandBuffer)
751 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
752 OnDestroyCommandBuffer)
753 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder, OnCreateVideoEncoder)
754 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder,
755 OnDestroyVideoEncoder)
756 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
757 OnDevToolsStartEventsRecording)
758 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
759 OnDevToolsStopEventsRecording)
761 GpuChannelMsg_CollectRenderingStatsForSurface,
762 OnCollectRenderingStatsForSurface)
763 IPC_MESSAGE_UNHANDLED(handled = false)
764 IPC_END_MESSAGE_MAP()
765 DCHECK(handled) << msg.type();
769 void GpuChannel::HandleMessage() {
770 handle_messages_scheduled_ = false;
771 if (deferred_messages_.empty())
774 bool should_fast_track_ack = false;
775 IPC::Message* m = deferred_messages_.front();
776 GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
780 if (!stub->IsScheduled())
782 if (stub->IsPreempted()) {
788 scoped_ptr<IPC::Message> message(m);
789 deferred_messages_.pop_front();
790 bool message_processed = true;
792 processed_get_state_fast_ =
793 (message->type() == GpuCommandBufferMsg_GetStateFast::ID);
795 currently_processing_message_ = message.get();
797 if (message->routing_id() == MSG_ROUTING_CONTROL)
798 result = OnControlMessageReceived(*message);
800 result = router_.RouteMessage(*message);
801 currently_processing_message_ = NULL;
804 // Respond to sync messages even if router failed to route.
805 if (message->is_sync()) {
806 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
807 reply->set_reply_error();
811 // If the command buffer becomes unscheduled as a result of handling the
812 // message but still has more commands to process, synthesize an IPC
813 // message to flush that command buffer.
815 if (stub->HasUnprocessedCommands()) {
816 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
818 message_processed = false;
822 if (message_processed)
825 // We want the EchoACK following the SwapBuffers to be sent as close as
826 // possible, avoiding scheduling other channels in the meantime.
827 should_fast_track_ack = false;
828 if (!deferred_messages_.empty()) {
829 m = deferred_messages_.front();
830 stub = stubs_.Lookup(m->routing_id());
831 should_fast_track_ack =
832 (m->type() == GpuCommandBufferMsg_Echo::ID) &&
833 stub && stub->IsScheduled();
835 } while (should_fast_track_ack);
837 if (!deferred_messages_.empty()) {
842 void GpuChannel::OnCreateOffscreenCommandBuffer(
843 const gfx::Size& size,
844 const GPUCreateCommandBufferConfig& init_params,
846 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
847 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
849 *route_id = GenerateRouteID();
851 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
854 gfx::GLSurfaceHandle(),
855 mailbox_manager_.get(),
856 image_manager_.get(),
858 disallowed_features_,
860 init_params.gpu_preference,
866 init_params.active_url));
867 if (preempted_flag_.get())
868 stub->SetPreemptByFlag(preempted_flag_);
869 router_.AddRoute(*route_id, stub.get());
870 stubs_.AddWithID(stub.release(), *route_id);
871 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
872 "route_id", route_id);
875 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
876 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
877 "route_id", route_id);
879 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
882 bool need_reschedule = (stub && !stub->IsScheduled());
883 router_.RemoveRoute(route_id);
884 stubs_.Remove(route_id);
885 // In case the renderer is currently blocked waiting for a sync reply from the
886 // stub, we need to make sure to reschedule the GpuChannel here.
887 if (need_reschedule) {
888 // This stub won't get a chance to reschedule, so update the count now.
889 StubSchedulingChanged(true);
893 void GpuChannel::OnCreateVideoEncoder(int32* route_id) {
894 TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
896 *route_id = GenerateRouteID();
897 GpuVideoEncodeAccelerator* encoder =
898 new GpuVideoEncodeAccelerator(this, *route_id);
899 router_.AddRoute(*route_id, encoder);
900 video_encoders_.AddWithID(encoder, *route_id);
903 void GpuChannel::OnDestroyVideoEncoder(int32 route_id) {
905 "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id);
906 GpuVideoEncodeAccelerator* encoder = video_encoders_.Lookup(route_id);
909 router_.RemoveRoute(route_id);
910 video_encoders_.Remove(route_id);
913 void GpuChannel::OnDevToolsStartEventsRecording(int32* route_id) {
914 devtools_gpu_agent_->StartEventsRecording(route_id);
917 void GpuChannel::OnDevToolsStopEventsRecording() {
918 devtools_gpu_agent_->StopEventsRecording();
921 void GpuChannel::OnCollectRenderingStatsForSurface(
922 int32 surface_id, GpuRenderingStats* stats) {
923 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
924 !it.IsAtEnd(); it.Advance()) {
925 int texture_upload_count =
926 it.GetCurrentValue()->decoder()->GetTextureUploadCount();
927 base::TimeDelta total_texture_upload_time =
928 it.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
929 base::TimeDelta total_processing_commands_time =
930 it.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
932 stats->global_texture_upload_count += texture_upload_count;
933 stats->global_total_texture_upload_time += total_texture_upload_time;
934 stats->global_total_processing_commands_time +=
935 total_processing_commands_time;
936 if (it.GetCurrentValue()->surface_id() == surface_id) {
937 stats->texture_upload_count += texture_upload_count;
938 stats->total_texture_upload_time += total_texture_upload_time;
939 stats->total_processing_commands_time += total_processing_commands_time;
943 GPUVideoMemoryUsageStats usage_stats;
944 gpu_channel_manager_->gpu_memory_manager()->GetVideoMemoryUsageStats(
946 stats->global_video_memory_bytes_allocated = usage_stats.bytes_allocated;
949 void GpuChannel::MessageProcessed() {
950 messages_processed_++;
951 if (preempting_flag_.get()) {
952 io_message_loop_->PostTask(
954 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
956 messages_processed_));
960 void GpuChannel::CacheShader(const std::string& key,
961 const std::string& shader) {
962 gpu_channel_manager_->Send(
963 new GpuHostMsg_CacheShader(client_id_, key, shader));
966 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter* filter) {
967 channel_->AddFilter(filter);
970 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter* filter) {
971 channel_->RemoveFilter(filter);
974 } // namespace content