1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "build/build_config.h"
13 #include "content/common/gpu/devtools_gpu_instrumentation.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/sync_point_manager.h"
24 #include "content/public/common/content_client.h"
25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gl_context_virtual.h"
29 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30 #include "gpu/command_buffer/service/gpu_control_service.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/logger.h"
33 #include "gpu/command_buffer/service/memory_tracking.h"
34 #include "gpu/command_buffer/service/query_manager.h"
35 #include "ui/gl/gl_bindings.h"
36 #include "ui/gl/gl_switches.h"
39 #include "content/public/common/sandbox_init.h"
42 #if defined(OS_ANDROID)
43 #include "content/common/gpu/stream_texture_android.h"
49 // The GpuCommandBufferMemoryTracker class provides a bridge between the
50 // ContextGroup's memory type managers and the GpuMemoryManager class.
51 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
53 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
54 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
55 CreateTrackingGroup(channel->renderer_pid(), this)) {
58 virtual void TrackMemoryAllocatedChange(
61 gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
62 tracking_group_->TrackMemoryAllocatedChange(
63 old_size, new_size, pool);
66 virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
67 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
71 virtual ~GpuCommandBufferMemoryTracker() {
73 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
75 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
78 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
80 void FastSetActiveURL(const GURL& url, size_t url_hash) {
81 // Leave the previously set URL in the empty case -- empty URLs are given by
82 // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
83 // onscreen context URL was set previously and will show up even when a crash
84 // occurs during offscreen command processing.
87 static size_t g_last_url_hash = 0;
88 if (url_hash != g_last_url_hash) {
89 g_last_url_hash = url_hash;
90 GetContentClient()->SetActiveURL(url);
94 // The first time polling a fence, delay some extra time to allow other
95 // stubs to process some work, or else the timing of the fences could
96 // allow a pattern of alternating fast and slow frames to occur.
97 const int64 kHandleMoreWorkPeriodMs = 2;
98 const int64 kHandleMoreWorkPeriodBusyMs = 1;
100 // Prevents idle work from being starved.
101 const int64 kMaxTimeSinceIdleMs = 10;
105 GpuCommandBufferStub::GpuCommandBufferStub(
107 GpuCommandBufferStub* share_group,
108 const gfx::GLSurfaceHandle& handle,
109 gpu::gles2::MailboxManager* mailbox_manager,
110 gpu::gles2::ImageManager* image_manager,
111 const gfx::Size& size,
112 const gpu::gles2::DisallowedFeatures& disallowed_features,
113 const std::vector<int32>& attribs,
114 gfx::GpuPreference gpu_preference,
115 bool use_virtualized_gl_context,
118 GpuWatchdog* watchdog,
120 const GURL& active_url)
124 disallowed_features_(disallowed_features),
125 requested_attribs_(attribs),
126 gpu_preference_(gpu_preference),
127 use_virtualized_gl_context_(use_virtualized_gl_context),
129 surface_id_(surface_id),
131 last_flush_count_(0),
132 last_memory_allocation_valid_(false),
134 sync_point_wait_count_(0),
135 delayed_work_scheduled_(false),
136 previous_messages_processed_(0),
137 active_url_(active_url),
138 total_gpu_memory_(0) {
139 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
140 FastSetActiveURL(active_url_, active_url_hash_);
142 context_group_ = share_group->context_group_;
144 context_group_ = new gpu::gles2::ContextGroup(
147 new GpuCommandBufferMemoryTracker(channel),
152 use_virtualized_gl_context_ |=
153 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
156 GpuCommandBufferStub::~GpuCommandBufferStub() {
159 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
160 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
163 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
164 return channel()->gpu_channel_manager()->gpu_memory_manager();
167 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
168 devtools_gpu_instrumentation::ScopedGpuTask task(this);
169 FastSetActiveURL(active_url_, active_url_hash_);
171 // Ensure the appropriate GL context is current before handling any IPC
172 // messages directed at the command buffer. This ensures that the message
173 // handler can assume that the context is current (not necessary for
174 // Echo, RetireSyncPoint, or WaitSyncPoint).
175 if (decoder_.get() &&
176 message.type() != GpuCommandBufferMsg_Echo::ID &&
177 message.type() != GpuCommandBufferMsg_GetStateFast::ID &&
178 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
179 message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
184 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
185 // here. This is so the reply can be delayed if the scheduler is unscheduled.
187 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
188 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
190 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
192 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
193 OnProduceFrontBuffer);
194 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
195 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
196 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
198 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
199 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
200 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
201 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
202 OnRegisterTransferBuffer);
203 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
204 OnDestroyTransferBuffer);
205 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
206 OnGetTransferBuffer);
207 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
208 OnCreateVideoDecoder)
209 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
211 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
213 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
215 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
217 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats,
218 OnReceivedClientManagedMemoryStats)
220 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
221 OnSetClientHasMemoryAllocationChangedCallback)
222 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer,
223 OnRegisterGpuMemoryBuffer);
224 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyGpuMemoryBuffer,
225 OnDestroyGpuMemoryBuffer);
226 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
227 OnCreateStreamTexture)
228 IPC_MESSAGE_UNHANDLED(handled = false)
229 IPC_END_MESSAGE_MAP()
231 // Ensure that any delayed work that was created will be handled.
232 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
238 bool GpuCommandBufferStub::Send(IPC::Message* message) {
239 return channel_->Send(message);
242 bool GpuCommandBufferStub::IsScheduled() {
243 return (!scheduler_.get() || scheduler_->IsScheduled());
246 bool GpuCommandBufferStub::HasMoreWork() {
247 return scheduler_.get() && scheduler_->HasMoreWork();
250 void GpuCommandBufferStub::PollWork() {
251 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
252 delayed_work_scheduled_ = false;
253 FastSetActiveURL(active_url_, active_url_hash_);
254 if (decoder_.get() && !MakeCurrent())
258 bool fences_complete = scheduler_->PollUnscheduleFences();
259 // Perform idle work if all fences are complete.
260 if (fences_complete) {
261 uint64 current_messages_processed =
262 channel()->gpu_channel_manager()->MessagesProcessed();
263 // We're idle when no messages were processed or scheduled.
265 (previous_messages_processed_ == current_messages_processed) &&
266 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
267 if (!is_idle && !last_idle_time_.is_null()) {
268 base::TimeDelta time_since_idle = base::TimeTicks::Now() -
270 base::TimeDelta max_time_since_idle =
271 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
273 // Force idle when it's been too long since last time we were idle.
274 if (time_since_idle > max_time_since_idle)
279 last_idle_time_ = base::TimeTicks::Now();
280 scheduler_->PerformIdleWork();
284 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
287 bool GpuCommandBufferStub::HasUnprocessedCommands() {
288 if (command_buffer_) {
289 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
290 return state.put_offset != state.get_offset &&
291 !gpu::error::IsError(state.error);
296 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
297 if (!HasMoreWork()) {
298 last_idle_time_ = base::TimeTicks();
302 if (delayed_work_scheduled_)
304 delayed_work_scheduled_ = true;
306 // Idle when no messages are processed between now and when
307 // PollWork is called.
308 previous_messages_processed_ =
309 channel()->gpu_channel_manager()->MessagesProcessed();
310 if (last_idle_time_.is_null())
311 last_idle_time_ = base::TimeTicks::Now();
313 // IsScheduled() returns true after passing all unschedule fences
314 // and this is when we can start performing idle work. Idle work
315 // is done synchronously so we can set delay to 0 and instead poll
316 // for more work at the rate idle work is performed. This also ensures
317 // that idle work is done as efficiently as possible without any
318 // unnecessary delays.
319 if (scheduler_.get() &&
320 scheduler_->IsScheduled() &&
321 scheduler_->HasMoreIdleWork()) {
325 base::MessageLoop::current()->PostDelayedTask(
327 base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
328 base::TimeDelta::FromMilliseconds(delay));
331 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
332 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
333 Send(new IPC::Message(message));
336 bool GpuCommandBufferStub::MakeCurrent() {
337 if (decoder_->MakeCurrent())
339 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
340 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
341 command_buffer_->SetParseError(gpu::error::kLostContext);
346 void GpuCommandBufferStub::Destroy() {
347 if (handle_.is_null() && !active_url_.is_empty()) {
348 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
349 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
353 memory_manager_client_state_.reset();
355 while (!sync_points_.empty())
356 OnRetireSyncPoint(sync_points_.front());
359 decoder_->set_engine(NULL);
361 // The scheduler has raw references to the decoder and the command buffer so
362 // destroy it before those.
365 bool have_context = false;
366 if (decoder_ && command_buffer_ &&
367 command_buffer_->GetState().error != gpu::error::kLostContext)
368 have_context = decoder_->MakeCurrent();
369 FOR_EACH_OBSERVER(DestructionObserver,
370 destruction_observers_,
371 OnWillDestroyStub());
374 decoder_->Destroy(have_context);
378 command_buffer_.reset();
380 // Remove this after crbug.com/248395 is sorted out.
384 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
386 GpuCommandBufferMsg_Initialize::WriteReplyParams(
387 reply_message, false, gpu::Capabilities());
391 void GpuCommandBufferStub::OnInitialize(
392 base::SharedMemoryHandle shared_state_handle,
393 IPC::Message* reply_message) {
394 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
395 DCHECK(!command_buffer_.get());
397 scoped_ptr<base::SharedMemory> shared_state_shm(
398 new base::SharedMemory(shared_state_handle, false));
400 command_buffer_.reset(new gpu::CommandBufferService(
401 context_group_->transfer_buffer_manager()));
403 if (!command_buffer_->Initialize()) {
404 DLOG(ERROR) << "CommandBufferService failed to initialize.\n";
405 OnInitializeFailed(reply_message);
409 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
411 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
414 if (preemption_flag_.get())
415 scheduler_->SetPreemptByFlag(preemption_flag_);
417 decoder_->set_engine(scheduler_.get());
419 if (!handle_.is_null()) {
420 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
422 DLOG(ERROR) << "No software support.\n";
423 OnInitializeFailed(reply_message);
428 surface_ = ImageTransportSurface::CreateSurface(
429 channel_->gpu_channel_manager(),
433 GpuChannelManager* manager = channel_->gpu_channel_manager();
434 surface_ = manager->GetDefaultOffscreenSurface();
437 if (!surface_.get()) {
438 DLOG(ERROR) << "Failed to create surface.\n";
439 OnInitializeFailed(reply_message);
443 scoped_refptr<gfx::GLContext> context;
444 if (use_virtualized_gl_context_ && channel_->share_group()) {
445 context = channel_->share_group()->GetSharedContext();
446 if (!context.get()) {
447 context = gfx::GLContext::CreateGLContext(
448 channel_->share_group(),
449 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
451 channel_->share_group()->SetSharedContext(context.get());
453 // This should be a non-virtual GL context.
454 DCHECK(context->GetHandle());
455 context = new gpu::GLContextVirtual(
456 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
457 if (!context->Initialize(surface_.get(), gpu_preference_)) {
458 // TODO(sievers): The real context created above for the default
459 // offscreen surface might not be compatible with this surface.
460 // Need to adjust at least GLX to be able to create the initial context
461 // with a config that is compatible with onscreen and offscreen surfaces.
464 DLOG(ERROR) << "Failed to initialize virtual GL context.";
465 OnInitializeFailed(reply_message);
469 if (!context.get()) {
470 context = gfx::GLContext::CreateGLContext(
471 channel_->share_group(), surface_.get(), gpu_preference_);
473 if (!context.get()) {
474 DLOG(ERROR) << "Failed to create context.\n";
475 OnInitializeFailed(reply_message);
479 if (!context->MakeCurrent(surface_.get())) {
480 LOG(ERROR) << "Failed to make context current.";
481 OnInitializeFailed(reply_message);
485 if (!context->GetGLStateRestorer()) {
486 context->SetGLStateRestorer(
487 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
490 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
491 total_gpu_memory_ = 0;
493 if (!context_group_->has_program_cache()) {
494 context_group_->set_program_cache(
495 channel_->gpu_channel_manager()->program_cache());
498 // Initialize the decoder with either the view or pbuffer GLContext.
499 if (!decoder_->Initialize(surface_,
503 disallowed_features_,
504 requested_attribs_)) {
505 DLOG(ERROR) << "Failed to initialize decoder.";
506 OnInitializeFailed(reply_message);
511 new gpu::GpuControlService(context_group_->image_manager(),
513 context_group_->mailbox_manager(),
515 decoder_->GetCapabilities()));
517 if (CommandLine::ForCurrentProcess()->HasSwitch(
518 switches::kEnableGPUServiceLogging)) {
519 decoder_->set_log_commands(true);
522 decoder_->GetLogger()->SetMsgCallback(
523 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
524 base::Unretained(this)));
525 decoder_->SetShaderCacheCallback(
526 base::Bind(&GpuCommandBufferStub::SendCachedShader,
527 base::Unretained(this)));
528 decoder_->SetWaitSyncPointCallback(
529 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
530 base::Unretained(this)));
532 command_buffer_->SetPutOffsetChangeCallback(
533 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
534 command_buffer_->SetGetBufferChangeCallback(
535 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
536 base::Unretained(scheduler_.get())));
537 command_buffer_->SetParseErrorCallback(
538 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
539 scheduler_->SetSchedulingChangedCallback(
540 base::Bind(&GpuChannel::StubSchedulingChanged,
541 base::Unretained(channel_)));
544 scheduler_->SetCommandProcessedCallback(
545 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
546 base::Unretained(this)));
549 if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
550 DLOG(ERROR) << "Failed to map shared stae buffer.";
551 OnInitializeFailed(reply_message);
555 GpuCommandBufferMsg_Initialize::WriteReplyParams(
556 reply_message, true, gpu_control_->GetCapabilities());
559 if (handle_.is_null() && !active_url_.is_empty()) {
560 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
561 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
566 void GpuCommandBufferStub::OnSetLatencyInfo(
567 const std::vector<ui::LatencyInfo>& latency_info) {
568 if (!ui::LatencyInfo::Verify(latency_info,
569 "GpuCommandBufferStub::OnSetLatencyInfo"))
571 if (!latency_info_callback_.is_null())
572 latency_info_callback_.Run(latency_info);
575 void GpuCommandBufferStub::OnCreateStreamTexture(uint32 texture_id,
577 #if defined(OS_ANDROID)
578 *stream_id = StreamTexture::Create(this, texture_id);
584 void GpuCommandBufferStub::SetLatencyInfoCallback(
585 const LatencyInfoCallback& callback) {
586 latency_info_callback_ = callback;
589 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
590 // The command buffer is pairs of enum, value
591 // search for the requested attribute, return the value.
592 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
593 it != requested_attribs_.end(); ++it) {
601 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
602 IPC::Message* reply_message) {
603 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
605 command_buffer_->SetGetBuffer(shm_id);
609 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
610 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
612 LOG(ERROR) << "Can't produce front buffer before initialization.";
616 decoder_->ProduceFrontBuffer(mailbox);
619 void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
620 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
621 if (command_buffer_) {
622 gpu::CommandBuffer::State state = command_buffer_->GetState();
624 GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
626 DLOG(ERROR) << "no command_buffer.";
627 reply_message->set_reply_error();
632 void GpuCommandBufferStub::OnParseError() {
633 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
634 DCHECK(command_buffer_.get());
635 gpu::CommandBuffer::State state = command_buffer_->GetState();
636 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
637 route_id_, state.context_lost_reason);
638 msg->set_unblock(true);
641 // Tell the browser about this context loss as well, so it can
642 // determine whether client APIs like WebGL need to be immediately
643 // blocked from automatically running.
644 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
645 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
646 handle_.is_null(), state.context_lost_reason, active_url_));
651 void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
652 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
653 DCHECK(command_buffer_.get());
655 gpu::CommandBuffer::State state = command_buffer_->GetState();
656 GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
660 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
661 uint32 flush_count) {
662 TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
663 "put_offset", put_offset);
664 DCHECK(command_buffer_.get());
665 if (flush_count - last_flush_count_ < 0x8000000U) {
666 last_flush_count_ = flush_count;
667 command_buffer_->Flush(put_offset);
669 // We received this message out-of-order. This should not happen but is here
670 // to catch regressions. Ignore the message.
671 NOTREACHED() << "Received a Flush message out-of-order";
677 void GpuCommandBufferStub::OnRescheduled() {
678 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
679 command_buffer_->Flush(pre_state.put_offset);
680 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
682 if (pre_state.get_offset != post_state.get_offset)
686 void GpuCommandBufferStub::OnRegisterTransferBuffer(
688 base::SharedMemoryHandle transfer_buffer,
690 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
691 base::SharedMemory shared_memory(transfer_buffer, false);
694 command_buffer_->RegisterTransferBuffer(id, &shared_memory, size);
697 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
698 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
701 command_buffer_->DestroyTransferBuffer(id);
704 void GpuCommandBufferStub::OnGetTransferBuffer(
706 IPC::Message* reply_message) {
707 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
708 if (command_buffer_) {
709 base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
712 gpu::Buffer buffer = command_buffer_->GetTransferBuffer(id);
713 if (buffer.shared_memory) {
715 transfer_buffer = NULL;
716 BrokerDuplicateHandle(buffer.shared_memory->handle(),
717 channel_->renderer_pid(), &transfer_buffer, FILE_MAP_READ |
719 DCHECK(transfer_buffer != NULL);
721 buffer.shared_memory->ShareToProcess(channel_->renderer_pid(),
727 GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
731 reply_message->set_reply_error();
736 void GpuCommandBufferStub::OnCommandProcessed() {
738 watchdog_->CheckArmed();
741 void GpuCommandBufferStub::ReportState() {
742 if (!CheckContextLost())
743 command_buffer_->UpdateState();
746 void GpuCommandBufferStub::PutChanged() {
747 FastSetActiveURL(active_url_, active_url_hash_);
748 scheduler_->PutChanged();
751 void GpuCommandBufferStub::OnCreateVideoDecoder(
752 media::VideoCodecProfile profile,
753 IPC::Message* reply_message) {
754 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
755 int decoder_route_id = channel_->GenerateRouteID();
756 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
757 decoder_route_id, this, channel_->io_message_loop());
758 decoder->Initialize(profile, reply_message);
759 // decoder is registered as a DestructionObserver of this stub and will
760 // self-delete during destruction of this stub.
763 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
764 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
765 if (memory_manager_client_state_)
766 memory_manager_client_state_->SetVisible(visible);
769 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
770 sync_points_.push_back(sync_point);
773 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
774 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
775 sync_points_.pop_front();
776 GpuChannelManager* manager = channel_->gpu_channel_manager();
777 manager->sync_point_manager()->RetireSyncPoint(sync_point);
780 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
781 GpuChannelManager* manager = channel_->gpu_channel_manager();
782 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
785 if (sync_point_wait_count_ == 0) {
786 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
787 "GpuCommandBufferStub", this);
789 scheduler_->SetScheduled(false);
790 ++sync_point_wait_count_;
791 manager->sync_point_manager()->AddSyncPointCallback(
793 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
795 return scheduler_->IsScheduled();
798 void GpuCommandBufferStub::OnSyncPointRetired() {
799 --sync_point_wait_count_;
800 if (sync_point_wait_count_ == 0) {
801 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
802 "GpuCommandBufferStub", this);
804 scheduler_->SetScheduled(true);
807 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
808 GpuChannelManager* manager = channel_->gpu_channel_manager();
809 manager->sync_point_manager()->AddSyncPointCallback(
811 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
816 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
817 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
820 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
822 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
824 gpu::gles2::QueryManager::Query* query =
825 query_manager->GetQuery(query_id);
828 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
835 // Something went wrong, run callback immediately.
836 OnSignalSyncPointAck(id);
840 void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
841 const gpu::ManagedMemoryStats& stats) {
844 "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
845 if (memory_manager_client_state_)
846 memory_manager_client_state_->SetManagedMemoryStats(stats);
849 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
853 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
855 if (!memory_manager_client_state_) {
856 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
857 this, surface_id_ != 0, true));
860 memory_manager_client_state_.reset();
864 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
866 gfx::GpuMemoryBufferHandle gpu_memory_buffer,
869 uint32 internalformat) {
870 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
872 gpu_control_->RegisterGpuMemoryBuffer(id,
880 void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id) {
881 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
883 gpu_control_->DestroyGpuMemoryBuffer(id);
886 void GpuCommandBufferStub::SendConsoleMessage(
888 const std::string& message) {
889 GPUCommandBufferConsoleMessage console_message;
890 console_message.id = id;
891 console_message.message = message;
892 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
893 route_id_, console_message);
894 msg->set_unblock(true);
898 void GpuCommandBufferStub::SendCachedShader(
899 const std::string& key, const std::string& shader) {
900 channel_->CacheShader(key, shader);
903 void GpuCommandBufferStub::AddDestructionObserver(
904 DestructionObserver* observer) {
905 destruction_observers_.AddObserver(observer);
908 void GpuCommandBufferStub::RemoveDestructionObserver(
909 DestructionObserver* observer) {
910 destruction_observers_.RemoveObserver(observer);
913 void GpuCommandBufferStub::SetPreemptByFlag(
914 scoped_refptr<gpu::PreemptionFlag> flag) {
915 preemption_flag_ = flag;
917 scheduler_->SetPreemptByFlag(preemption_flag_);
920 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
921 *bytes = total_gpu_memory_;
922 return !!total_gpu_memory_;
925 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
928 return surface_->GetSize();
931 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
932 return context_group_->memory_tracker();
935 void GpuCommandBufferStub::SetMemoryAllocation(
936 const gpu::MemoryAllocation& allocation) {
937 if (!last_memory_allocation_valid_ ||
938 !allocation.Equals(last_memory_allocation_)) {
939 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
940 route_id_, allocation));
943 last_memory_allocation_valid_ = true;
944 last_memory_allocation_ = allocation;
947 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
948 bool suggest_have_frontbuffer) {
949 // This can be called outside of OnMessageReceived, so the context needs
950 // to be made current before calling methods on the surface.
951 if (surface_.get() && MakeCurrent())
952 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
955 bool GpuCommandBufferStub::CheckContextLost() {
956 DCHECK(command_buffer_);
957 gpu::CommandBuffer::State state = command_buffer_->GetState();
958 bool was_lost = state.error == gpu::error::kLostContext;
959 // Lose all other contexts if the reset was triggered by the robustness
960 // extension instead of being synthetic.
961 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
962 (gfx::GLContext::LosesAllContextsOnContextLost() ||
963 use_virtualized_gl_context_))
964 channel_->LoseAllContexts();
968 void GpuCommandBufferStub::MarkContextLost() {
969 if (!command_buffer_ ||
970 command_buffer_->GetState().error == gpu::error::kLostContext)
973 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
975 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
976 command_buffer_->SetParseError(gpu::error::kLostContext);
979 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
980 return GetMemoryManager()->GetClientMemoryUsage(this);
983 } // namespace content