Upstream version 6.35.121.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / gpu_command_buffer_stub.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
9 #include "base/hash.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "build/build_config.h"
13 #include "content/common/gpu/devtools_gpu_instrumentation.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/sync_point_manager.h"
24 #include "content/public/common/content_client.h"
25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gl_context_virtual.h"
29 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
30 #include "gpu/command_buffer/service/gpu_control_service.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/logger.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/memory_tracking.h"
35 #include "gpu/command_buffer/service/query_manager.h"
36 #include "ui/gl/gl_bindings.h"
37 #include "ui/gl/gl_switches.h"
38
39 #if defined(OS_WIN)
40 #include "content/public/common/sandbox_init.h"
41 #endif
42
43 #if defined(OS_ANDROID)
44 #include "content/common/gpu/stream_texture_android.h"
45 #endif
46
47 namespace content {
48 namespace {
49
50 // The GpuCommandBufferMemoryTracker class provides a bridge between the
51 // ContextGroup's memory type managers and the GpuMemoryManager class.
52 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
53  public:
54   explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
55       tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
56           CreateTrackingGroup(channel->renderer_pid(), this)) {
57   }
58
59   virtual void TrackMemoryAllocatedChange(
60       size_t old_size,
61       size_t new_size,
62       gpu::gles2::MemoryTracker::Pool pool) OVERRIDE {
63     tracking_group_->TrackMemoryAllocatedChange(
64         old_size, new_size, pool);
65   }
66
67   virtual bool EnsureGPUMemoryAvailable(size_t size_needed) OVERRIDE {
68     return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
69   };
70
71  private:
72   virtual ~GpuCommandBufferMemoryTracker() {
73   }
74   scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
75
76   DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
77 };
78
79 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
80 // url_hash matches.
81 void FastSetActiveURL(const GURL& url, size_t url_hash) {
82   // Leave the previously set URL in the empty case -- empty URLs are given by
83   // WebKitPlatformSupportImpl::createOffscreenGraphicsContext3D. Hopefully the
84   // onscreen context URL was set previously and will show up even when a crash
85   // occurs during offscreen command processing.
86   if (url.is_empty())
87     return;
88   static size_t g_last_url_hash = 0;
89   if (url_hash != g_last_url_hash) {
90     g_last_url_hash = url_hash;
91     GetContentClient()->SetActiveURL(url);
92   }
93 }
94
95 // The first time polling a fence, delay some extra time to allow other
96 // stubs to process some work, or else the timing of the fences could
97 // allow a pattern of alternating fast and slow frames to occur.
98 const int64 kHandleMoreWorkPeriodMs = 2;
99 const int64 kHandleMoreWorkPeriodBusyMs = 1;
100
101 // Prevents idle work from being starved.
102 const int64 kMaxTimeSinceIdleMs = 10;
103
104 }  // namespace
105
106 GpuCommandBufferStub::GpuCommandBufferStub(
107     GpuChannel* channel,
108     GpuCommandBufferStub* share_group,
109     const gfx::GLSurfaceHandle& handle,
110     gpu::gles2::MailboxManager* mailbox_manager,
111     gpu::gles2::ImageManager* image_manager,
112     const gfx::Size& size,
113     const gpu::gles2::DisallowedFeatures& disallowed_features,
114     const std::vector<int32>& attribs,
115     gfx::GpuPreference gpu_preference,
116     bool use_virtualized_gl_context,
117     int32 route_id,
118     int32 surface_id,
119     GpuWatchdog* watchdog,
120     bool software,
121     const GURL& active_url)
122     : channel_(channel),
123       handle_(handle),
124       initial_size_(size),
125       disallowed_features_(disallowed_features),
126       requested_attribs_(attribs),
127       gpu_preference_(gpu_preference),
128       use_virtualized_gl_context_(use_virtualized_gl_context),
129       route_id_(route_id),
130       surface_id_(surface_id),
131       software_(software),
132       last_flush_count_(0),
133       last_memory_allocation_valid_(false),
134       watchdog_(watchdog),
135       sync_point_wait_count_(0),
136       delayed_work_scheduled_(false),
137       previous_messages_processed_(0),
138       active_url_(active_url),
139       total_gpu_memory_(0) {
140   active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
141   FastSetActiveURL(active_url_, active_url_hash_);
142   if (share_group) {
143     context_group_ = share_group->context_group_;
144   } else {
145     context_group_ = new gpu::gles2::ContextGroup(
146         mailbox_manager,
147         image_manager,
148         new GpuCommandBufferMemoryTracker(channel),
149         NULL,
150         true);
151   }
152
153   use_virtualized_gl_context_ |=
154       context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
155 }
156
157 GpuCommandBufferStub::~GpuCommandBufferStub() {
158   Destroy();
159
160   GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
161   gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
162 }
163
164 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
165     return channel()->gpu_channel_manager()->gpu_memory_manager();
166 }
167
168 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
169   devtools_gpu_instrumentation::ScopedGpuTask task(channel());
170   FastSetActiveURL(active_url_, active_url_hash_);
171
172   // Ensure the appropriate GL context is current before handling any IPC
173   // messages directed at the command buffer. This ensures that the message
174   // handler can assume that the context is current (not necessary for
175   // Echo, RetireSyncPoint, or WaitSyncPoint).
176   if (decoder_.get() &&
177       message.type() != GpuCommandBufferMsg_Echo::ID &&
178       message.type() != GpuCommandBufferMsg_GetStateFast::ID &&
179       message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
180       message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
181     if (!MakeCurrent())
182       return false;
183   }
184
185   // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
186   // here. This is so the reply can be delayed if the scheduler is unscheduled.
187   bool handled = true;
188   IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
189     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
190                                     OnInitialize);
191     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
192                                     OnSetGetBuffer);
193     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
194                         OnProduceFrontBuffer);
195     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
196     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
197     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
198                                     OnGetStateFast);
199     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
200     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
201     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
202     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
203                         OnRegisterTransferBuffer);
204     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
205                         OnDestroyTransferBuffer);
206     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
207                                     OnGetTransferBuffer);
208     IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
209                                     OnCreateVideoDecoder)
210     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
211                         OnSetSurfaceVisible)
212     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
213                         OnRetireSyncPoint)
214     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
215                         OnSignalSyncPoint)
216     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
217                         OnSignalQuery)
218     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats,
219                         OnReceivedClientManagedMemoryStats)
220     IPC_MESSAGE_HANDLER(
221         GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
222         OnSetClientHasMemoryAllocationChangedCallback)
223     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterGpuMemoryBuffer,
224                         OnRegisterGpuMemoryBuffer);
225     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyGpuMemoryBuffer,
226                         OnDestroyGpuMemoryBuffer);
227     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
228                         OnCreateStreamTexture)
229     IPC_MESSAGE_UNHANDLED(handled = false)
230   IPC_END_MESSAGE_MAP()
231
232   // Ensure that any delayed work that was created will be handled.
233   ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
234
235   DCHECK(handled);
236   return handled;
237 }
238
239 bool GpuCommandBufferStub::Send(IPC::Message* message) {
240   return channel_->Send(message);
241 }
242
243 bool GpuCommandBufferStub::IsScheduled() {
244   return (!scheduler_.get() || scheduler_->IsScheduled());
245 }
246
247 bool GpuCommandBufferStub::HasMoreWork() {
248   return scheduler_.get() && scheduler_->HasMoreWork();
249 }
250
251 void GpuCommandBufferStub::PollWork() {
252   TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
253   delayed_work_scheduled_ = false;
254   FastSetActiveURL(active_url_, active_url_hash_);
255   if (decoder_.get() && !MakeCurrent())
256     return;
257
258   if (scheduler_) {
259     bool fences_complete = scheduler_->PollUnscheduleFences();
260     // Perform idle work if all fences are complete.
261     if (fences_complete) {
262       uint64 current_messages_processed =
263           channel()->gpu_channel_manager()->MessagesProcessed();
264       // We're idle when no messages were processed or scheduled.
265       bool is_idle =
266           (previous_messages_processed_ == current_messages_processed) &&
267           !channel()->gpu_channel_manager()->HandleMessagesScheduled();
268       if (!is_idle && !last_idle_time_.is_null()) {
269         base::TimeDelta time_since_idle = base::TimeTicks::Now() -
270             last_idle_time_;
271         base::TimeDelta max_time_since_idle =
272             base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
273
274         // Force idle when it's been too long since last time we were idle.
275         if (time_since_idle > max_time_since_idle)
276           is_idle = true;
277       }
278
279       if (is_idle) {
280         last_idle_time_ = base::TimeTicks::Now();
281         scheduler_->PerformIdleWork();
282       }
283     }
284   }
285   ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
286 }
287
288 bool GpuCommandBufferStub::HasUnprocessedCommands() {
289   if (command_buffer_) {
290     gpu::CommandBuffer::State state = command_buffer_->GetLastState();
291     return state.put_offset != state.get_offset &&
292         !gpu::error::IsError(state.error);
293   }
294   return false;
295 }
296
297 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
298   if (!HasMoreWork()) {
299     last_idle_time_ = base::TimeTicks();
300     return;
301   }
302
303   if (delayed_work_scheduled_)
304     return;
305   delayed_work_scheduled_ = true;
306
307   // Idle when no messages are processed between now and when
308   // PollWork is called.
309   previous_messages_processed_ =
310       channel()->gpu_channel_manager()->MessagesProcessed();
311   if (last_idle_time_.is_null())
312     last_idle_time_ = base::TimeTicks::Now();
313
314   // IsScheduled() returns true after passing all unschedule fences
315   // and this is when we can start performing idle work. Idle work
316   // is done synchronously so we can set delay to 0 and instead poll
317   // for more work at the rate idle work is performed. This also ensures
318   // that idle work is done as efficiently as possible without any
319   // unnecessary delays.
320   if (scheduler_.get() &&
321       scheduler_->IsScheduled() &&
322       scheduler_->HasMoreIdleWork()) {
323     delay = 0;
324   }
325
326   base::MessageLoop::current()->PostDelayedTask(
327       FROM_HERE,
328       base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
329       base::TimeDelta::FromMilliseconds(delay));
330 }
331
332 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
333   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
334   Send(new IPC::Message(message));
335 }
336
337 bool GpuCommandBufferStub::MakeCurrent() {
338   if (decoder_->MakeCurrent())
339     return true;
340   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
341   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
342   command_buffer_->SetParseError(gpu::error::kLostContext);
343   CheckContextLost();
344   return false;
345 }
346
347 void GpuCommandBufferStub::Destroy() {
348   if (handle_.is_null() && !active_url_.is_empty()) {
349     GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
350     gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
351         active_url_));
352   }
353
354   memory_manager_client_state_.reset();
355
356   while (!sync_points_.empty())
357     OnRetireSyncPoint(sync_points_.front());
358
359   if (decoder_)
360     decoder_->set_engine(NULL);
361
362   // The scheduler has raw references to the decoder and the command buffer so
363   // destroy it before those.
364   scheduler_.reset();
365
366   bool have_context = false;
367   if (decoder_ && command_buffer_ &&
368       command_buffer_->GetState().error != gpu::error::kLostContext)
369     have_context = decoder_->MakeCurrent();
370   FOR_EACH_OBSERVER(DestructionObserver,
371                     destruction_observers_,
372                     OnWillDestroyStub());
373
374   if (decoder_) {
375     decoder_->Destroy(have_context);
376     decoder_.reset();
377   }
378
379   command_buffer_.reset();
380
381   // Remove this after crbug.com/248395 is sorted out.
382   surface_ = NULL;
383 }
384
385 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
386   Destroy();
387   GpuCommandBufferMsg_Initialize::WriteReplyParams(
388       reply_message, false, gpu::Capabilities());
389   Send(reply_message);
390 }
391
392 void GpuCommandBufferStub::OnInitialize(
393     base::SharedMemoryHandle shared_state_handle,
394     IPC::Message* reply_message) {
395   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
396   DCHECK(!command_buffer_.get());
397
398   scoped_ptr<base::SharedMemory> shared_state_shm(
399       new base::SharedMemory(shared_state_handle, false));
400
401   command_buffer_.reset(new gpu::CommandBufferService(
402       context_group_->transfer_buffer_manager()));
403
404   bool result = command_buffer_->Initialize();
405   DCHECK(result);
406
407   decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
408
409   scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
410                                          decoder_.get(),
411                                          decoder_.get()));
412   if (preemption_flag_.get())
413     scheduler_->SetPreemptByFlag(preemption_flag_);
414
415   decoder_->set_engine(scheduler_.get());
416
417   if (!handle_.is_null()) {
418 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
419     if (software_) {
420       LOG(ERROR) << "No software support.\n";
421       OnInitializeFailed(reply_message);
422       return;
423     }
424 #endif
425
426     surface_ = ImageTransportSurface::CreateSurface(
427         channel_->gpu_channel_manager(),
428         this,
429         handle_);
430   } else {
431     GpuChannelManager* manager = channel_->gpu_channel_manager();
432     surface_ = manager->GetDefaultOffscreenSurface();
433   }
434
435   if (!surface_.get()) {
436     DLOG(ERROR) << "Failed to create surface.\n";
437     OnInitializeFailed(reply_message);
438     return;
439   }
440
441   scoped_refptr<gfx::GLContext> context;
442   if (use_virtualized_gl_context_ && channel_->share_group()) {
443     context = channel_->share_group()->GetSharedContext();
444     if (!context.get()) {
445       context = gfx::GLContext::CreateGLContext(
446           channel_->share_group(),
447           channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
448           gpu_preference_);
449       channel_->share_group()->SetSharedContext(context.get());
450     }
451     // This should be a non-virtual GL context.
452     DCHECK(context->GetHandle());
453     context = new gpu::GLContextVirtual(
454         channel_->share_group(), context.get(), decoder_->AsWeakPtr());
455     if (!context->Initialize(surface_.get(), gpu_preference_)) {
456       // TODO(sievers): The real context created above for the default
457       // offscreen surface might not be compatible with this surface.
458       // Need to adjust at least GLX to be able to create the initial context
459       // with a config that is compatible with onscreen and offscreen surfaces.
460       context = NULL;
461
462       DLOG(ERROR) << "Failed to initialize virtual GL context.";
463       OnInitializeFailed(reply_message);
464       return;
465     }
466   }
467   if (!context.get()) {
468     context = gfx::GLContext::CreateGLContext(
469         channel_->share_group(), surface_.get(), gpu_preference_);
470   }
471   if (!context.get()) {
472     DLOG(ERROR) << "Failed to create context.\n";
473     OnInitializeFailed(reply_message);
474     return;
475   }
476
477   if (!context->MakeCurrent(surface_.get())) {
478     LOG(ERROR) << "Failed to make context current.";
479     OnInitializeFailed(reply_message);
480     return;
481   }
482
483   if (!context->GetGLStateRestorer()) {
484     context->SetGLStateRestorer(
485         new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
486   }
487
488   if (!context->GetTotalGpuMemory(&total_gpu_memory_))
489     total_gpu_memory_ = 0;
490
491   if (!context_group_->has_program_cache()) {
492     context_group_->set_program_cache(
493         channel_->gpu_channel_manager()->program_cache());
494   }
495
496   // Initialize the decoder with either the view or pbuffer GLContext.
497   if (!decoder_->Initialize(surface_,
498                             context,
499                             !surface_id(),
500                             initial_size_,
501                             disallowed_features_,
502                             requested_attribs_)) {
503     DLOG(ERROR) << "Failed to initialize decoder.";
504     OnInitializeFailed(reply_message);
505     return;
506   }
507
508   gpu_control_.reset(
509       new gpu::GpuControlService(context_group_->image_manager(),
510                                  NULL,
511                                  context_group_->mailbox_manager(),
512                                  NULL,
513                                  decoder_->GetCapabilities()));
514
515   if (CommandLine::ForCurrentProcess()->HasSwitch(
516       switches::kEnableGPUServiceLogging)) {
517     decoder_->set_log_commands(true);
518   }
519
520   decoder_->GetLogger()->SetMsgCallback(
521       base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
522                  base::Unretained(this)));
523   decoder_->SetShaderCacheCallback(
524       base::Bind(&GpuCommandBufferStub::SendCachedShader,
525                  base::Unretained(this)));
526   decoder_->SetWaitSyncPointCallback(
527       base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
528                  base::Unretained(this)));
529
530   command_buffer_->SetPutOffsetChangeCallback(
531       base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
532   command_buffer_->SetGetBufferChangeCallback(
533       base::Bind(&gpu::GpuScheduler::SetGetBuffer,
534                  base::Unretained(scheduler_.get())));
535   command_buffer_->SetParseErrorCallback(
536       base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
537   scheduler_->SetSchedulingChangedCallback(
538       base::Bind(&GpuChannel::StubSchedulingChanged,
539                  base::Unretained(channel_)));
540
541   if (watchdog_) {
542     scheduler_->SetCommandProcessedCallback(
543         base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
544                    base::Unretained(this)));
545   }
546
547   if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
548     DLOG(ERROR) << "Failed to map shared stae buffer.";
549     OnInitializeFailed(reply_message);
550     return;
551   }
552
553   GpuCommandBufferMsg_Initialize::WriteReplyParams(
554       reply_message, true, gpu_control_->GetCapabilities());
555   Send(reply_message);
556
557   if (handle_.is_null() && !active_url_.is_empty()) {
558     GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
559     gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
560         active_url_));
561   }
562 }
563
564 void GpuCommandBufferStub::OnSetLatencyInfo(
565     const std::vector<ui::LatencyInfo>& latency_info) {
566   if (!ui::LatencyInfo::Verify(latency_info,
567                                "GpuCommandBufferStub::OnSetLatencyInfo"))
568     return;
569   if (!latency_info_callback_.is_null())
570     latency_info_callback_.Run(latency_info);
571 }
572
573 void GpuCommandBufferStub::OnCreateStreamTexture(uint32 texture_id,
574                                                  int32* stream_id) {
575 #if defined(OS_ANDROID)
576   *stream_id = StreamTexture::Create(this, texture_id);
577 #else
578   *stream_id = 0;
579 #endif
580 }
581
582 void GpuCommandBufferStub::SetLatencyInfoCallback(
583     const LatencyInfoCallback& callback) {
584   latency_info_callback_ = callback;
585 }
586
587 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
588   // The command buffer is pairs of enum, value
589   // search for the requested attribute, return the value.
590   for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
591        it != requested_attribs_.end(); ++it) {
592     if (*it++ == attr) {
593       return *it;
594     }
595   }
596   return -1;
597 }
598
599 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
600                                           IPC::Message* reply_message) {
601   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
602   if (command_buffer_)
603     command_buffer_->SetGetBuffer(shm_id);
604   Send(reply_message);
605 }
606
607 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
608   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
609   if (!decoder_) {
610     LOG(ERROR) << "Can't produce front buffer before initialization.";
611     return;
612   }
613
614   decoder_->ProduceFrontBuffer(mailbox);
615 }
616
617 void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
618   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
619   if (command_buffer_) {
620     gpu::CommandBuffer::State state = command_buffer_->GetState();
621     CheckContextLost();
622     GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
623   } else {
624     DLOG(ERROR) << "no command_buffer.";
625     reply_message->set_reply_error();
626   }
627   Send(reply_message);
628 }
629
630 void GpuCommandBufferStub::OnParseError() {
631   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
632   DCHECK(command_buffer_.get());
633   gpu::CommandBuffer::State state = command_buffer_->GetState();
634   IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
635       route_id_, state.context_lost_reason);
636   msg->set_unblock(true);
637   Send(msg);
638
639   // Tell the browser about this context loss as well, so it can
640   // determine whether client APIs like WebGL need to be immediately
641   // blocked from automatically running.
642   GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
643   gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
644       handle_.is_null(), state.context_lost_reason, active_url_));
645
646   CheckContextLost();
647 }
648
649 void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
650   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
651   DCHECK(command_buffer_.get());
652   CheckContextLost();
653   gpu::CommandBuffer::State state = command_buffer_->GetState();
654   GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
655   Send(reply_message);
656 }
657
658 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
659                                         uint32 flush_count) {
660   TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
661                "put_offset", put_offset);
662   DCHECK(command_buffer_.get());
663   if (flush_count - last_flush_count_ < 0x8000000U) {
664     last_flush_count_ = flush_count;
665     command_buffer_->Flush(put_offset);
666   } else {
667     // We received this message out-of-order. This should not happen but is here
668     // to catch regressions. Ignore the message.
669     NOTREACHED() << "Received a Flush message out-of-order";
670   }
671
672   ReportState();
673 }
674
675 void GpuCommandBufferStub::OnRescheduled() {
676   gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
677   command_buffer_->Flush(pre_state.put_offset);
678   gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
679
680   if (pre_state.get_offset != post_state.get_offset)
681     ReportState();
682 }
683
684 void GpuCommandBufferStub::OnRegisterTransferBuffer(
685     int32 id,
686     base::SharedMemoryHandle transfer_buffer,
687     uint32 size) {
688   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
689
690   // Take ownership of the memory and map it into this process.
691   // This validates the size.
692   scoped_ptr<base::SharedMemory> shared_memory(
693       new base::SharedMemory(transfer_buffer, false));
694   if (!shared_memory->Map(size)) {
695     DVLOG(0) << "Failed to map shared memory.";
696     return;
697   }
698
699   if (command_buffer_)
700     command_buffer_->RegisterTransferBuffer(id, shared_memory.Pass(), size);
701 }
702
703 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
704   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
705
706   if (command_buffer_)
707     command_buffer_->DestroyTransferBuffer(id);
708 }
709
710 void GpuCommandBufferStub::OnGetTransferBuffer(
711     int32 id,
712     IPC::Message* reply_message) {
713   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
714   if (command_buffer_) {
715     base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
716     uint32 size = 0;
717
718     scoped_refptr<gpu::Buffer> buffer = command_buffer_->GetTransferBuffer(id);
719     if (buffer && buffer->shared_memory()) {
720 #if defined(OS_WIN)
721       transfer_buffer = NULL;
722       BrokerDuplicateHandle(buffer->shared_memory()->handle(),
723                             channel_->renderer_pid(),
724                             &transfer_buffer,
725                             FILE_MAP_READ | FILE_MAP_WRITE,
726                             0);
727       DCHECK(transfer_buffer != NULL);
728 #else
729       buffer->shared_memory()->ShareToProcess(channel_->renderer_pid(),
730                                               &transfer_buffer);
731 #endif
732       size = buffer->size();
733     }
734
735     GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
736                                                             transfer_buffer,
737                                                             size);
738   } else {
739     reply_message->set_reply_error();
740   }
741   Send(reply_message);
742 }
743
744 void GpuCommandBufferStub::OnCommandProcessed() {
745   if (watchdog_)
746     watchdog_->CheckArmed();
747 }
748
749 void GpuCommandBufferStub::ReportState() {
750   if (!CheckContextLost())
751     command_buffer_->UpdateState();
752 }
753
754 void GpuCommandBufferStub::PutChanged() {
755   FastSetActiveURL(active_url_, active_url_hash_);
756   scheduler_->PutChanged();
757 }
758
759 void GpuCommandBufferStub::OnCreateVideoDecoder(
760     media::VideoCodecProfile profile,
761     IPC::Message* reply_message) {
762   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
763   int decoder_route_id = channel_->GenerateRouteID();
764   GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
765       decoder_route_id, this, channel_->io_message_loop());
766   decoder->Initialize(profile, reply_message);
767   // decoder is registered as a DestructionObserver of this stub and will
768   // self-delete during destruction of this stub.
769 }
770
771 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
772   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
773   if (memory_manager_client_state_)
774     memory_manager_client_state_->SetVisible(visible);
775 }
776
777 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
778   sync_points_.push_back(sync_point);
779 }
780
781 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
782   DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
783   sync_points_.pop_front();
784   if (context_group_->mailbox_manager()->UsesSync() && MakeCurrent())
785     context_group_->mailbox_manager()->PushTextureUpdates();
786   GpuChannelManager* manager = channel_->gpu_channel_manager();
787   manager->sync_point_manager()->RetireSyncPoint(sync_point);
788 }
789
790 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
791   if (!sync_point)
792     return true;
793   GpuChannelManager* manager = channel_->gpu_channel_manager();
794   if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
795     return true;
796
797   if (sync_point_wait_count_ == 0) {
798     TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
799                              "GpuCommandBufferStub", this);
800   }
801   scheduler_->SetScheduled(false);
802   ++sync_point_wait_count_;
803   manager->sync_point_manager()->AddSyncPointCallback(
804       sync_point,
805       base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
806                  this->AsWeakPtr()));
807   return scheduler_->IsScheduled();
808 }
809
810 void GpuCommandBufferStub::OnSyncPointRetired() {
811   --sync_point_wait_count_;
812   if (sync_point_wait_count_ == 0) {
813     TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
814                            "GpuCommandBufferStub", this);
815   }
816   scheduler_->SetScheduled(true);
817 }
818
819 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
820   GpuChannelManager* manager = channel_->gpu_channel_manager();
821   manager->sync_point_manager()->AddSyncPointCallback(
822       sync_point,
823       base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
824                  this->AsWeakPtr(),
825                  id));
826 }
827
828 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
829   Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
830 }
831
832 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
833   if (decoder_) {
834     gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
835     if (query_manager) {
836       gpu::gles2::QueryManager::Query* query =
837           query_manager->GetQuery(query_id);
838       if (query) {
839         query->AddCallback(
840           base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
841                      this->AsWeakPtr(),
842                      id));
843         return;
844       }
845     }
846   }
847   // Something went wrong, run callback immediately.
848   OnSignalSyncPointAck(id);
849 }
850
851
852 void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats(
853     const gpu::ManagedMemoryStats& stats) {
854   TRACE_EVENT0(
855       "gpu",
856       "GpuCommandBufferStub::OnReceivedClientManagedMemoryStats");
857   if (memory_manager_client_state_)
858     memory_manager_client_state_->SetManagedMemoryStats(stats);
859 }
860
861 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
862     bool has_callback) {
863   TRACE_EVENT0(
864       "gpu",
865       "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
866   if (has_callback) {
867     if (!memory_manager_client_state_) {
868       memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
869           this, surface_id_ != 0, true));
870     }
871   } else {
872     memory_manager_client_state_.reset();
873   }
874 }
875
876 void GpuCommandBufferStub::OnRegisterGpuMemoryBuffer(
877     int32 id,
878     gfx::GpuMemoryBufferHandle gpu_memory_buffer,
879     uint32 width,
880     uint32 height,
881     uint32 internalformat) {
882   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
883   if (gpu_control_) {
884     gpu_control_->RegisterGpuMemoryBuffer(id,
885                                           gpu_memory_buffer,
886                                           width,
887                                           height,
888                                           internalformat);
889   }
890 }
891
892 void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id) {
893   TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
894   if (gpu_control_)
895     gpu_control_->DestroyGpuMemoryBuffer(id);
896 }
897
898 void GpuCommandBufferStub::SendConsoleMessage(
899     int32 id,
900     const std::string& message) {
901   GPUCommandBufferConsoleMessage console_message;
902   console_message.id = id;
903   console_message.message = message;
904   IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
905       route_id_, console_message);
906   msg->set_unblock(true);
907   Send(msg);
908 }
909
910 void GpuCommandBufferStub::SendCachedShader(
911     const std::string& key, const std::string& shader) {
912   channel_->CacheShader(key, shader);
913 }
914
915 void GpuCommandBufferStub::AddDestructionObserver(
916     DestructionObserver* observer) {
917   destruction_observers_.AddObserver(observer);
918 }
919
920 void GpuCommandBufferStub::RemoveDestructionObserver(
921     DestructionObserver* observer) {
922   destruction_observers_.RemoveObserver(observer);
923 }
924
925 void GpuCommandBufferStub::SetPreemptByFlag(
926     scoped_refptr<gpu::PreemptionFlag> flag) {
927   preemption_flag_ = flag;
928   if (scheduler_)
929     scheduler_->SetPreemptByFlag(preemption_flag_);
930 }
931
932 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
933   *bytes = total_gpu_memory_;
934   return !!total_gpu_memory_;
935 }
936
937 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
938   if (!surface_.get())
939     return gfx::Size();
940   return surface_->GetSize();
941 }
942
943 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
944   return context_group_->memory_tracker();
945 }
946
947 void GpuCommandBufferStub::SetMemoryAllocation(
948     const gpu::MemoryAllocation& allocation) {
949   if (!last_memory_allocation_valid_ ||
950       !allocation.Equals(last_memory_allocation_)) {
951     Send(new GpuCommandBufferMsg_SetMemoryAllocation(
952         route_id_, allocation));
953   }
954
955   last_memory_allocation_valid_ = true;
956   last_memory_allocation_ = allocation;
957 }
958
959 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
960     bool suggest_have_frontbuffer) {
961   // This can be called outside of OnMessageReceived, so the context needs
962   // to be made current before calling methods on the surface.
963   if (surface_.get() && MakeCurrent())
964     surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
965 }
966
967 bool GpuCommandBufferStub::CheckContextLost() {
968   DCHECK(command_buffer_);
969   gpu::CommandBuffer::State state = command_buffer_->GetState();
970   bool was_lost = state.error == gpu::error::kLostContext;
971   // Lose all other contexts if the reset was triggered by the robustness
972   // extension instead of being synthetic.
973   if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
974       (gfx::GLContext::LosesAllContextsOnContextLost() ||
975        use_virtualized_gl_context_))
976     channel_->LoseAllContexts();
977   return was_lost;
978 }
979
980 void GpuCommandBufferStub::MarkContextLost() {
981   if (!command_buffer_ ||
982       command_buffer_->GetState().error == gpu::error::kLostContext)
983     return;
984
985   command_buffer_->SetContextLostReason(gpu::error::kUnknown);
986   if (decoder_)
987     decoder_->LoseContext(GL_UNKNOWN_CONTEXT_RESET_ARB);
988   command_buffer_->SetParseError(gpu::error::kLostContext);
989 }
990
991 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
992   return GetMemoryManager()->GetClientMemoryUsage(this);
993 }
994
995 }  // namespace content