1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "build/build_config.h"
13 #include "content/common/gpu/devtools_gpu_instrumentation.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/sync_point_manager.h"
24 #include "content/public/common/content_client.h"
25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gl_context_virtual.h"
29 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30 #include "gpu/command_buffer/service/gpu_control_service.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/logger.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h"
36 #include "ui/gl/gl_bindings.h"
37 #include "ui/gl/gl_switches.h"
40 #include "content/public/common/sandbox_init.h"
43 #if defined(OS_ANDROID)
44 #include "content/common/gpu/stream_texture_android.h"
50 // The GpuCommandBufferMemoryTracker class provides a bridge between the
51 // ContextGroup's memory type managers and the GpuMemoryManager class.
52 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
54 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
55 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
56 CreateTrackingGroup(channel->renderer_pid(), this)) {
59 virtual void TrackMemoryAllocatedChange(
62 gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
63 tracking_group_->TrackMemoryAllocatedChange(
64 old_size, new_size, pool);
67 virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
68 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
72 virtual ~GpuCommandBufferMemoryTracker() {
74 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
76 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
79 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
81 void FastSetActiveURL(const GURL& url, size_t url_hash) {
82 // Leave the previously set URL in the empty case -- empty URLs are given by
83 // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
84 // onscreen context URL was set previously and will show up even when a crash
85 // occurs during offscreen command processing.
88 static size_t g_last_url_hash = 0;
89 if (url_hash != g_last_url_hash) {
90 g_last_url_hash = url_hash;
91 GetContentClient()->SetActiveURL(url);
95 // The first time polling a fence, delay some extra time to allow other
96 // stubs to process some work, or else the timing of the fences could
97 // allow a pattern of alternating fast and slow frames to occur.
98 const int64 kHandleMoreWorkPeriodMs = 2;
99 const int64 kHandleMoreWorkPeriodBusyMs = 1;
101 // Prevents idle work from being starved.
102 const int64 kMaxTimeSinceIdleMs = 10;
106 GpuCommandBufferStub::GpuCommandBufferStub(
108 GpuCommandBufferStub* share_group,
109 const gfx::GLSurfaceHandle& handle,
110 gpu::gles2::MailboxManager* mailbox_manager,
111 gpu::gles2::ImageManager* image_manager,
112 const gfx::Size& size,
113 const gpu::gles2::DisallowedFeatures& disallowed_features,
114 const std::vector<int32>& attribs,
115 gfx::GpuPreference gpu_preference,
116 bool use_virtualized_gl_context,
119 GpuWatchdog* watchdog,
121 const GURL& active_url)
125 disallowed_features_(disallowed_features),
126 requested_attribs_(attribs),
127 gpu_preference_(gpu_preference),
128 use_virtualized_gl_context_(use_virtualized_gl_context),
130 surface_id_(surface_id),
132 last_flush_count_(0),
133 last_memory_allocation_valid_(false),
135 sync_point_wait_count_(0),
136 delayed_work_scheduled_(false),
137 previous_messages_processed_(0),
138 active_url_(active_url),
139 total_gpu_memory_(0) {
140 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
141 FastSetActiveURL(active_url_, active_url_hash_);
143 context_group_ = share_group->context_group_;
145 context_group_ = new gpu::gles2::ContextGroup(
148 new GpuCommandBufferMemoryTracker(channel),
153 use_virtualized_gl_context_ |=
154 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
157 GpuCommandBufferStub::~GpuCommandBufferStub() {
160 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
161 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
164 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
165 return channel()->gpu_channel_manager()->gpu_memory_manager();
168 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
169 devtools_gpu_instrumentation::ScopedGpuTask task(channel());
170 FastSetActiveURL(active_url_, active_url_hash_);
172 // Ensure the appropriate GL context is current before handling any IPC
173 // messages directed at the command buffer. This ensures that the message
174 // handler can assume that the context is current (not necessary for
175 // Echo, RetireSyncPoint, or WaitSyncPoint).
176 if (decoder_.get() &&
177 message.type() != GpuCommandBufferMsg_Echo::ID &&
178 message.type() != GpuCommandBufferMsg_GetStateFast::ID &&
179 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
180 message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
185 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
186 // here. This is so the reply can be delayed if the scheduler is unscheduled.
188 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
189 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
191 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
193 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
194 OnProduceFrontBuffer);
195 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
196 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
197 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
199 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
200 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
201 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
202 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
203 OnRegisterTransferBuffer);
204 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
205 OnDestroyTransferBuffer);
206 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
207 OnGetTransferBuffer);
208 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
209 OnCreateVideoDecoder)
210 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
212 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
214 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
216 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
218 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats,
219 OnReceivedClientManagedMemoryStats)
221 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
222 OnSetClientHasMemoryAllocationChangedCallback)
223 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer,
224 OnRegisterGpuMemoryBuffer);
225 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyGpuMemoryBuffer,
226 OnDestroyGpuMemoryBuffer);
227 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
228 OnCreateStreamTexture)
229 IPC_MESSAGE_UNHANDLED(handled = false)
230 IPC_END_MESSAGE_MAP()
232 // Ensure that any delayed work that was created will be handled.
233 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
239 bool GpuCommandBufferStub::Send(IPC::Message* message) {
240 return channel_->Send(message);
243 bool GpuCommandBufferStub::IsScheduled() {
244 return (!scheduler_.get() || scheduler_->IsScheduled());
247 bool GpuCommandBufferStub::HasMoreWork() {
248 return scheduler_.get() && scheduler_->HasMoreWork();
251 void GpuCommandBufferStub::PollWork() {
252 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
253 delayed_work_scheduled_ = false;
254 FastSetActiveURL(active_url_, active_url_hash_);
255 if (decoder_.get() && !MakeCurrent())
259 bool fences_complete = scheduler_->PollUnscheduleFences();
260 // Perform idle work if all fences are complete.
261 if (fences_complete) {
262 uint64 current_messages_processed =
263 channel()->gpu_channel_manager()->MessagesProcessed();
264 // We're idle when no messages were processed or scheduled.
266 (previous_messages_processed_ == current_messages_processed) &&
267 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
268 if (!is_idle && !last_idle_time_.is_null()) {
269 base::TimeDelta time_since_idle = base::TimeTicks::Now() -
271 base::TimeDelta max_time_since_idle =
272 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
274 // Force idle when it's been too long since last time we were idle.
275 if (time_since_idle > max_time_since_idle)
280 last_idle_time_ = base::TimeTicks::Now();
281 scheduler_->PerformIdleWork();
285 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
288 bool GpuCommandBufferStub::HasUnprocessedCommands() {
289 if (command_buffer_) {
290 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
291 return state.put_offset != state.get_offset &&
292 !gpu::error::IsError(state.error);
297 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
298 if (!HasMoreWork()) {
299 last_idle_time_ = base::TimeTicks();
303 if (delayed_work_scheduled_)
305 delayed_work_scheduled_ = true;
307 // Idle when no messages are processed between now and when
308 // PollWork is called.
309 previous_messages_processed_ =
310 channel()->gpu_channel_manager()->MessagesProcessed();
311 if (last_idle_time_.is_null())
312 last_idle_time_ = base::TimeTicks::Now();
314 // IsScheduled() returns true after passing all unschedule fences
315 // and this is when we can start performing idle work. Idle work
316 // is done synchronously so we can set delay to 0 and instead poll
317 // for more work at the rate idle work is performed. This also ensures
318 // that idle work is done as efficiently as possible without any
319 // unnecessary delays.
320 if (scheduler_.get() &&
321 scheduler_->IsScheduled() &&
322 scheduler_->HasMoreIdleWork()) {
326 base::MessageLoop::current()->PostDelayedTask(
328 base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
329 base::TimeDelta::FromMilliseconds(delay));
332 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
333 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
334 Send(new IPC::Message(message));
337 bool GpuCommandBufferStub::MakeCurrent() {
338 if (decoder_->MakeCurrent())
340 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
341 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
342 command_buffer_->SetParseError(gpu::error::kLostContext);
347 void GpuCommandBufferStub::Destroy() {
348 if (handle_.is_null() && !active_url_.is_empty()) {
349 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
350 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
354 memory_manager_client_state_.reset();
356 while (!sync_points_.empty())
357 OnRetireSyncPoint(sync_points_.front());
360 decoder_->set_engine(NULL);
362 // The scheduler has raw references to the decoder and the command buffer so
363 // destroy it before those.
366 bool have_context = false;
367 if (decoder_ && command_buffer_ &&
368 command_buffer_->GetState().error != gpu::error::kLostContext)
369 have_context = decoder_->MakeCurrent();
370 FOR_EACH_OBSERVER(DestructionObserver,
371 destruction_observers_,
372 OnWillDestroyStub());
375 decoder_->Destroy(have_context);
379 command_buffer_.reset();
381 // Remove this after crbug.com/248395 is sorted out.
385 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
387 GpuCommandBufferMsg_Initialize::WriteReplyParams(
388 reply_message, false, gpu::Capabilities());
392 void GpuCommandBufferStub::OnInitialize(
393 base::SharedMemoryHandle shared_state_handle,
394 IPC::Message* reply_message) {
395 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
396 DCHECK(!command_buffer_.get());
398 scoped_ptr<base::SharedMemory> shared_state_shm(
399 new base::SharedMemory(shared_state_handle, false));
401 command_buffer_.reset(new gpu::CommandBufferService(
402 context_group_->transfer_buffer_manager()));
404 bool result = command_buffer_->Initialize();
407 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
409 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
412 if (preemption_flag_.get())
413 scheduler_->SetPreemptByFlag(preemption_flag_);
415 decoder_->set_engine(scheduler_.get());
417 if (!handle_.is_null()) {
418 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
420 LOG(ERROR) << "No software support.\n";
421 OnInitializeFailed(reply_message);
426 surface_ = ImageTransportSurface::CreateSurface(
427 channel_->gpu_channel_manager(),
431 GpuChannelManager* manager = channel_->gpu_channel_manager();
432 surface_ = manager->GetDefaultOffscreenSurface();
435 if (!surface_.get()) {
436 DLOG(ERROR) << "Failed to create surface.\n";
437 OnInitializeFailed(reply_message);
441 scoped_refptr<gfx::GLContext> context;
442 if (use_virtualized_gl_context_ && channel_->share_group()) {
443 context = channel_->share_group()->GetSharedContext();
444 if (!context.get()) {
445 context = gfx::GLContext::CreateGLContext(
446 channel_->share_group(),
447 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
449 channel_->share_group()->SetSharedContext(context.get());
451 // This should be a non-virtual GL context.
452 DCHECK(context->GetHandle());
453 context = new gpu::GLContextVirtual(
454 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
455 if (!context->Initialize(surface_.get(), gpu_preference_)) {
456 // TODO(sievers): The real context created above for the default
457 // offscreen surface might not be compatible with this surface.
458 // Need to adjust at least GLX to be able to create the initial context
459 // with a config that is compatible with onscreen and offscreen surfaces.
462 DLOG(ERROR) << "Failed to initialize virtual GL context.";
463 OnInitializeFailed(reply_message);
467 if (!context.get()) {
468 context = gfx::GLContext::CreateGLContext(
469 channel_->share_group(), surface_.get(), gpu_preference_);
471 if (!context.get()) {
472 DLOG(ERROR) << "Failed to create context.\n";
473 OnInitializeFailed(reply_message);
477 if (!context->MakeCurrent(surface_.get())) {
478 LOG(ERROR) << "Failed to make context current.";
479 OnInitializeFailed(reply_message);
483 if (!context->GetGLStateRestorer()) {
484 context->SetGLStateRestorer(
485 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
488 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
489 total_gpu_memory_ = 0;
491 if (!context_group_->has_program_cache()) {
492 context_group_->set_program_cache(
493 channel_->gpu_channel_manager()->program_cache());
496 // Initialize the decoder with either the view or pbuffer GLContext.
497 if (!decoder_->Initialize(surface_,
501 disallowed_features_,
502 requested_attribs_)) {
503 DLOG(ERROR) << "Failed to initialize decoder.";
504 OnInitializeFailed(reply_message);
509 new gpu::GpuControlService(context_group_->image_manager(),
511 context_group_->mailbox_manager(),
513 decoder_->GetCapabilities()));
515 if (CommandLine::ForCurrentProcess()->HasSwitch(
516 switches::kEnableGPUServiceLogging)) {
517 decoder_->set_log_commands(true);
520 decoder_->GetLogger()->SetMsgCallback(
521 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
522 base::Unretained(this)));
523 decoder_->SetShaderCacheCallback(
524 base::Bind(&GpuCommandBufferStub::SendCachedShader,
525 base::Unretained(this)));
526 decoder_->SetWaitSyncPointCallback(
527 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
528 base::Unretained(this)));
530 command_buffer_->SetPutOffsetChangeCallback(
531 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
532 command_buffer_->SetGetBufferChangeCallback(
533 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
534 base::Unretained(scheduler_.get())));
535 command_buffer_->SetParseErrorCallback(
536 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
537 scheduler_->SetSchedulingChangedCallback(
538 base::Bind(&GpuChannel::StubSchedulingChanged,
539 base::Unretained(channel_)));
542 scheduler_->SetCommandProcessedCallback(
543 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
544 base::Unretained(this)));
547 if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
548 DLOG(ERROR) << "Failed to map shared stae buffer.";
549 OnInitializeFailed(reply_message);
553 GpuCommandBufferMsg_Initialize::WriteReplyParams(
554 reply_message, true, gpu_control_->GetCapabilities());
557 if (handle_.is_null() && !active_url_.is_empty()) {
558 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
559 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
564 void GpuCommandBufferStub::OnSetLatencyInfo(
565 const std::vector<ui::LatencyInfo>& latency_info) {
566 if (!ui::LatencyInfo::Verify(latency_info,
567 "GpuCommandBufferStub::OnSetLatencyInfo"))
569 if (!latency_info_callback_.is_null())
570 latency_info_callback_.Run(latency_info);
573 void GpuCommandBufferStub::OnCreateStreamTexture(uint32 texture_id,
575 #if defined(OS_ANDROID)
576 *stream_id = StreamTexture::Create(this, texture_id);
582 void GpuCommandBufferStub::SetLatencyInfoCallback(
583 const LatencyInfoCallback& callback) {
584 latency_info_callback_ = callback;
587 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
588 // The command buffer is pairs of enum, value
589 // search for the requested attribute, return the value.
590 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
591 it != requested_attribs_.end(); ++it) {
599 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
600 IPC::Message* reply_message) {
601 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
603 command_buffer_->SetGetBuffer(shm_id);
607 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
608 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
610 LOG(ERROR) << "Can't produce front buffer before initialization.";
614 decoder_->ProduceFrontBuffer(mailbox);
617 void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
618 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
619 if (command_buffer_) {
620 gpu::CommandBuffer::State state = command_buffer_->GetState();
622 GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
624 DLOG(ERROR) << "no command_buffer.";
625 reply_message->set_reply_error();
630 void GpuCommandBufferStub::OnParseError() {
631 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
632 DCHECK(command_buffer_.get());
633 gpu::CommandBuffer::State state = command_buffer_->GetState();
634 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
635 route_id_, state.context_lost_reason);
636 msg->set_unblock(true);
639 // Tell the browser about this context loss as well, so it can
640 // determine whether client APIs like WebGL need to be immediately
641 // blocked from automatically running.
642 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
643 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
644 handle_.is_null(), state.context_lost_reason, active_url_));
649 void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
650 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
651 DCHECK(command_buffer_.get());
653 gpu::CommandBuffer::State state = command_buffer_->GetState();
654 GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
658 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
659 uint32 flush_count) {
660 TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
661 "put_offset", put_offset);
662 DCHECK(command_buffer_.get());
663 if (flush_count - last_flush_count_ < 0x8000000U) {
664 last_flush_count_ = flush_count;
665 command_buffer_->Flush(put_offset);
667 // We received this message out-of-order. This should not happen but is here
668 // to catch regressions. Ignore the message.
669 NOTREACHED() << "Received a Flush message out-of-order";
675 void GpuCommandBufferStub::OnRescheduled() {
676 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
677 command_buffer_->Flush(pre_state.put_offset);
678 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
680 if (pre_state.get_offset != post_state.get_offset)
684 void GpuCommandBufferStub::OnRegisterTransferBuffer(
686 base::SharedMemoryHandle transfer_buffer,
688 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
690 // Take ownership of the memory and map it into this process.
691 // This validates the size.
692 scoped_ptr<base::SharedMemory> shared_memory(
693 new base::SharedMemory(transfer_buffer, false));
694 if (!shared_memory->Map(size)) {
695 DVLOG(0) << "Failed to map shared memory.";
700 command_buffer_->RegisterTransferBuffer(id, shared_memory.Pass(), size);
703 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
704 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
707 command_buffer_->DestroyTransferBuffer(id);
710 void GpuCommandBufferStub::OnGetTransferBuffer(
712 IPC::Message* reply_message) {
713 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
714 if (command_buffer_) {
715 base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
718 scoped_refptr<gpu::Buffer> buffer = command_buffer_->GetTransferBuffer(id);
719 if (buffer && buffer->shared_memory()) {
721 transfer_buffer = NULL;
722 BrokerDuplicateHandle(buffer->shared_memory()->handle(),
723 channel_->renderer_pid(),
725 FILE_MAP_READ | FILE_MAP_WRITE,
727 DCHECK(transfer_buffer != NULL);
729 buffer->shared_memory()->ShareToProcess(channel_->renderer_pid(),
732 size = buffer->size();
735 GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
739 reply_message->set_reply_error();
744 void GpuCommandBufferStub::OnCommandProcessed() {
746 watchdog_->CheckArmed();
749 void GpuCommandBufferStub::ReportState() {
750 if (!CheckContextLost())
751 command_buffer_->UpdateState();
754 void GpuCommandBufferStub::PutChanged() {
755 FastSetActiveURL(active_url_, active_url_hash_);
756 scheduler_->PutChanged();
759 void GpuCommandBufferStub::OnCreateVideoDecoder(
760 media::VideoCodecProfile profile,
761 IPC::Message* reply_message) {
762 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
763 int decoder_route_id = channel_->GenerateRouteID();
764 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
765 decoder_route_id, this, channel_->io_message_loop());
766 decoder->Initialize(profile, reply_message);
767 // decoder is registered as a DestructionObserver of this stub and will
768 // self-delete during destruction of this stub.
771 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
772 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
773 if (memory_manager_client_state_)
774 memory_manager_client_state_->SetVisible(visible);
777 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
778 sync_points_.push_back(sync_point);
781 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
782 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
783 sync_points_.pop_front();
784 if (context_group_->mailbox_manager()->UsesSync() && MakeCurrent())
785 context_group_->mailbox_manager()->PushTextureUpdates();
786 GpuChannelManager* manager = channel_->gpu_channel_manager();
787 manager->sync_point_manager()->RetireSyncPoint(sync_point);
790 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
793 GpuChannelManager* manager = channel_->gpu_channel_manager();
794 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
797 if (sync_point_wait_count_ == 0) {
798 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
799 "GpuCommandBufferStub", this);
801 scheduler_->SetScheduled(false);
802 ++sync_point_wait_count_;
803 manager->sync_point_manager()->AddSyncPointCallback(
805 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
807 return scheduler_->IsScheduled();
810 void GpuCommandBufferStub::OnSyncPointRetired() {
811 --sync_point_wait_count_;
812 if (sync_point_wait_count_ == 0) {
813 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
814 "GpuCommandBufferStub", this);
816 scheduler_->SetScheduled(true);
819 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
820 GpuChannelManager* manager = channel_->gpu_channel_manager();
821 manager->sync_point_manager()->AddSyncPointCallback(
823 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
828 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
829 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
832 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
834 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
836 gpu::gles2::QueryManager::Query* query =
837 query_manager->GetQuery(query_id);
840 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
847 // Something went wrong, run callback immediately.
848 OnSignalSyncPointAck(id);
852 void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
853 const gpu::ManagedMemoryStats& stats) {
856 "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
857 if (memory_manager_client_state_)
858 memory_manager_client_state_->SetManagedMemoryStats(stats);
861 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
865 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
867 if (!memory_manager_client_state_) {
868 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
869 this, surface_id_ != 0, true));
872 memory_manager_client_state_.reset();
876 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
878 gfx::GpuMemoryBufferHandle gpu_memory_buffer,
881 uint32 internalformat) {
882 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
884 gpu_control_->RegisterGpuMemoryBuffer(id,
892 void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id) {
893 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
895 gpu_control_->DestroyGpuMemoryBuffer(id);
898 void GpuCommandBufferStub::SendConsoleMessage(
900 const std::string& message) {
901 GPUCommandBufferConsoleMessage console_message;
902 console_message.id = id;
903 console_message.message = message;
904 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
905 route_id_, console_message);
906 msg->set_unblock(true);
910 void GpuCommandBufferStub::SendCachedShader(
911 const std::string& key, const std::string& shader) {
912 channel_->CacheShader(key, shader);
915 void GpuCommandBufferStub::AddDestructionObserver(
916 DestructionObserver* observer) {
917 destruction_observers_.AddObserver(observer);
920 void GpuCommandBufferStub::RemoveDestructionObserver(
921 DestructionObserver* observer) {
922 destruction_observers_.RemoveObserver(observer);
925 void GpuCommandBufferStub::SetPreemptByFlag(
926 scoped_refptr<gpu::PreemptionFlag> flag) {
927 preemption_flag_ = flag;
929 scheduler_->SetPreemptByFlag(preemption_flag_);
932 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
933 *bytes = total_gpu_memory_;
934 return !!total_gpu_memory_;
937 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
940 return surface_->GetSize();
943 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
944 return context_group_->memory_tracker();
947 void GpuCommandBufferStub::SetMemoryAllocation(
948 const gpu::MemoryAllocation& allocation) {
949 if (!last_memory_allocation_valid_ ||
950 !allocation.Equals(last_memory_allocation_)) {
951 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
952 route_id_, allocation));
955 last_memory_allocation_valid_ = true;
956 last_memory_allocation_ = allocation;
959 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
960 bool suggest_have_frontbuffer) {
961 // This can be called outside of OnMessageReceived, so the context needs
962 // to be made current before calling methods on the surface.
963 if (surface_.get() && MakeCurrent())
964 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
967 bool GpuCommandBufferStub::CheckContextLost() {
968 DCHECK(command_buffer_);
969 gpu::CommandBuffer::State state = command_buffer_->GetState();
970 bool was_lost = state.error == gpu::error::kLostContext;
971 // Lose all other contexts if the reset was triggered by the robustness
972 // extension instead of being synthetic.
973 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
974 (gfx::GLContext::LosesAllContextsOnContextLost() ||
975 use_virtualized_gl_context_))
976 channel_->LoseAllContexts();
980 void GpuCommandBufferStub::MarkContextLost() {
981 if (!command_buffer_ ||
982 command_buffer_->GetState().error == gpu::error::kLostContext)
985 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
987 decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
988 command_buffer_->SetParseError(gpu::error::kLostContext);
991 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
992 return GetMemoryManager()->GetClientMemoryUsage(this);
995 } // namespace content