Upstream version 6.35.121.0
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / service / in_process_command_buffer.cc
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <queue>
8 #include <set>
9 #include <utility>
10
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
14 #endif
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
17
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/service/command_buffer_service.h"
28 #include "gpu/command_buffer/service/context_group.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gpu_control_service.h"
31 #include "gpu/command_buffer/service/gpu_scheduler.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
35 #include "ui/gfx/size.h"
36 #include "ui/gl/gl_context.h"
37 #include "ui/gl/gl_image.h"
38 #include "ui/gl/gl_share_group.h"
39
40 #if defined(OS_ANDROID)
41 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
42 #include "ui/gl/android/surface_texture.h"
43 #endif
44
45 namespace gpu {
46
47 namespace {
48
49 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
50
51 template <typename T>
52 static void RunTaskWithResult(base::Callback<T(void)> task,
53                               T* result,
54                               base::WaitableEvent* completion) {
55   *result = task.Run();
56   completion->Signal();
57 }
58
59 class GpuInProcessThread
60     : public base::Thread,
61       public InProcessCommandBuffer::Service,
62       public base::RefCountedThreadSafe<GpuInProcessThread> {
63  public:
64   GpuInProcessThread();
65
66   virtual void AddRef() const OVERRIDE {
67     base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
68   }
69   virtual void Release() const OVERRIDE {
70     base::RefCountedThreadSafe<GpuInProcessThread>::Release();
71   }
72
73   virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
74   virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
75   virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
76
77  private:
78   virtual ~GpuInProcessThread();
79   friend class base::RefCountedThreadSafe<GpuInProcessThread>;
80
81   DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
82 };
83
84 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
85   Start();
86 }
87
88 GpuInProcessThread::~GpuInProcessThread() {
89   Stop();
90 }
91
92 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
93   message_loop()->PostTask(FROM_HERE, task);
94 }
95
96 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
97   message_loop()->PostDelayedTask(
98       FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
99 }
100
101 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
102     LAZY_INSTANCE_INITIALIZER;
103 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
104     LAZY_INSTANCE_INITIALIZER;
105
106 class ScopedEvent {
107  public:
108   ScopedEvent(base::WaitableEvent* event) : event_(event) {}
109   ~ScopedEvent() { event_->Signal(); }
110
111  private:
112   base::WaitableEvent* event_;
113 };
114
115 class SyncPointManager {
116  public:
117   SyncPointManager();
118   ~SyncPointManager();
119
120   uint32 GenerateSyncPoint();
121   void RetireSyncPoint(uint32 sync_point);
122
123   bool IsSyncPointPassed(uint32 sync_point);
124   void WaitSyncPoint(uint32 sync_point);
125
126 private:
127   // This lock protects access to pending_sync_points_ and next_sync_point_ and
128   // is used with the ConditionVariable to signal when a sync point is retired.
129   base::Lock lock_;
130   std::set<uint32> pending_sync_points_;
131   uint32 next_sync_point_;
132   base::ConditionVariable cond_var_;
133 };
134
135 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
136
137 SyncPointManager::~SyncPointManager() {
138   DCHECK_EQ(pending_sync_points_.size(), 0U);
139 }
140
141 uint32 SyncPointManager::GenerateSyncPoint() {
142   base::AutoLock lock(lock_);
143   uint32 sync_point = next_sync_point_++;
144   DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
145   pending_sync_points_.insert(sync_point);
146   return sync_point;
147 }
148
149 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
150   base::AutoLock lock(lock_);
151   DCHECK(pending_sync_points_.count(sync_point));
152   pending_sync_points_.erase(sync_point);
153   cond_var_.Broadcast();
154 }
155
156 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
157   base::AutoLock lock(lock_);
158   return pending_sync_points_.count(sync_point) == 0;
159 }
160
161 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
162   base::AutoLock lock(lock_);
163   while (pending_sync_points_.count(sync_point)) {
164     cond_var_.Wait();
165   }
166 }
167
168 base::LazyInstance<SyncPointManager> g_sync_point_manager =
169     LAZY_INSTANCE_INITIALIZER;
170
171 bool WaitSyncPoint(uint32 sync_point) {
172   g_sync_point_manager.Get().WaitSyncPoint(sync_point);
173   return true;
174 }
175
176 }  // anonyous namespace
177
178 InProcessCommandBuffer::Service::Service() {}
179
180 InProcessCommandBuffer::Service::~Service() {}
181
182 scoped_refptr<InProcessCommandBuffer::Service>
183 InProcessCommandBuffer::GetDefaultService() {
184   base::AutoLock lock(default_thread_clients_lock_.Get());
185   scoped_refptr<Service> service;
186   if (!default_thread_clients_.Get().empty()) {
187     InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
188     service = other->service_;
189     DCHECK(service.get());
190   } else {
191     service = new GpuInProcessThread;
192   }
193   return service;
194 }
195
196 InProcessCommandBuffer::InProcessCommandBuffer(
197     const scoped_refptr<Service>& service)
198     : context_lost_(false),
199       last_put_offset_(-1),
200       flush_event_(false, false),
201       service_(service.get() ? service : GetDefaultService()),
202       gpu_thread_weak_ptr_factory_(this) {
203   if (!service) {
204     base::AutoLock lock(default_thread_clients_lock_.Get());
205     default_thread_clients_.Get().insert(this);
206   }
207 }
208
209 InProcessCommandBuffer::~InProcessCommandBuffer() {
210   Destroy();
211   base::AutoLock lock(default_thread_clients_lock_.Get());
212   default_thread_clients_.Get().erase(this);
213 }
214
215 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
216   CheckSequencedThread();
217   DCHECK(!surface_->IsOffscreen());
218   surface_->Resize(size);
219 }
220
221 bool InProcessCommandBuffer::MakeCurrent() {
222   CheckSequencedThread();
223   command_buffer_lock_.AssertAcquired();
224
225   if (!context_lost_ && decoder_->MakeCurrent())
226     return true;
227   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
228   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
229   command_buffer_->SetParseError(gpu::error::kLostContext);
230   return false;
231 }
232
233 void InProcessCommandBuffer::PumpCommands() {
234   CheckSequencedThread();
235   command_buffer_lock_.AssertAcquired();
236
237   if (!MakeCurrent())
238     return;
239
240   gpu_scheduler_->PutChanged();
241 }
242
243 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
244   CheckSequencedThread();
245   command_buffer_lock_.AssertAcquired();
246   command_buffer_->SetGetBuffer(transfer_buffer_id);
247   return true;
248 }
249
250 bool InProcessCommandBuffer::Initialize(
251     scoped_refptr<gfx::GLSurface> surface,
252     bool is_offscreen,
253     gfx::AcceleratedWidget window,
254     const gfx::Size& size,
255     const std::vector<int32>& attribs,
256     gfx::GpuPreference gpu_preference,
257     const base::Closure& context_lost_callback,
258     InProcessCommandBuffer* share_group) {
259   DCHECK(!share_group || service_ == share_group->service_);
260   context_lost_callback_ = WrapCallback(context_lost_callback);
261
262   if (surface) {
263     // GPU thread must be the same as client thread due to GLSurface not being
264     // thread safe.
265     sequence_checker_.reset(new base::SequenceChecker);
266     surface_ = surface;
267   }
268
269   gpu::Capabilities capabilities;
270   InitializeOnGpuThreadParams params(is_offscreen,
271                                      window,
272                                      size,
273                                      attribs,
274                                      gpu_preference,
275                                      &capabilities,
276                                      share_group);
277
278   base::Callback<bool(void)> init_task =
279       base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
280                  base::Unretained(this),
281                  params);
282
283   base::WaitableEvent completion(true, false);
284   bool result = false;
285   QueueTask(
286       base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
287   completion.Wait();
288
289   if (result)
290     capabilities_ = capabilities;
291   return result;
292 }
293
294 bool InProcessCommandBuffer::InitializeOnGpuThread(
295     const InitializeOnGpuThreadParams& params) {
296   CheckSequencedThread();
297   gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
298
299   DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
300
301   TransferBufferManager* manager = new TransferBufferManager();
302   transfer_buffer_manager_.reset(manager);
303   manager->Initialize();
304
305   scoped_ptr<CommandBufferService> command_buffer(
306       new CommandBufferService(transfer_buffer_manager_.get()));
307   command_buffer->SetPutOffsetChangeCallback(base::Bind(
308       &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
309   command_buffer->SetParseErrorCallback(base::Bind(
310       &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
311
312   if (!command_buffer->Initialize()) {
313     LOG(ERROR) << "Could not initialize command buffer.";
314     DestroyOnGpuThread();
315     return false;
316   }
317
318   gl_share_group_ = params.context_group
319                         ? params.context_group->gl_share_group_.get()
320                         : new gfx::GLShareGroup;
321
322 #if defined(OS_ANDROID)
323   stream_texture_manager_.reset(new StreamTextureManagerInProcess);
324 #endif
325
326   bool bind_generates_resource = false;
327   decoder_.reset(gles2::GLES2Decoder::Create(
328       params.context_group ? params.context_group->decoder_->GetContextGroup()
329                     : new gles2::ContextGroup(NULL,
330                                               NULL,
331                                               NULL,
332                                               NULL,
333                                               bind_generates_resource)));
334
335   gpu_scheduler_.reset(
336       new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
337   command_buffer->SetGetBufferChangeCallback(base::Bind(
338       &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
339   command_buffer_ = command_buffer.Pass();
340
341   decoder_->set_engine(gpu_scheduler_.get());
342
343   if (!surface_) {
344     if (params.is_offscreen)
345       surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
346     else
347       surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
348   }
349
350   if (!surface_.get()) {
351     LOG(ERROR) << "Could not create GLSurface.";
352     DestroyOnGpuThread();
353     return false;
354   }
355
356   if (service_->UseVirtualizedGLContexts()) {
357     context_ = gl_share_group_->GetSharedContext();
358     if (!context_.get()) {
359       context_ = gfx::GLContext::CreateGLContext(
360           gl_share_group_.get(), surface_.get(), params.gpu_preference);
361       gl_share_group_->SetSharedContext(context_.get());
362     }
363
364     context_ = new GLContextVirtual(
365         gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
366     if (context_->Initialize(surface_.get(), params.gpu_preference)) {
367       VLOG(1) << "Created virtual GL context.";
368     } else {
369       context_ = NULL;
370     }
371   } else {
372     context_ = gfx::GLContext::CreateGLContext(
373         gl_share_group_.get(), surface_.get(), params.gpu_preference);
374   }
375
376   if (!context_.get()) {
377     LOG(ERROR) << "Could not create GLContext.";
378     DestroyOnGpuThread();
379     return false;
380   }
381
382   if (!context_->MakeCurrent(surface_.get())) {
383     LOG(ERROR) << "Could not make context current.";
384     DestroyOnGpuThread();
385     return false;
386   }
387
388   gles2::DisallowedFeatures disallowed_features;
389   disallowed_features.gpu_memory_manager = true;
390   if (!decoder_->Initialize(surface_,
391                             context_,
392                             params.is_offscreen,
393                             params.size,
394                             disallowed_features,
395                             params.attribs)) {
396     LOG(ERROR) << "Could not initialize decoder.";
397     DestroyOnGpuThread();
398     return false;
399   }
400
401   gpu_control_.reset(
402       new GpuControlService(decoder_->GetContextGroup()->image_manager(),
403                             g_gpu_memory_buffer_factory,
404                             decoder_->GetContextGroup()->mailbox_manager(),
405                             decoder_->GetQueryManager(),
406                             decoder_->GetCapabilities()));
407
408   *params.capabilities = gpu_control_->GetCapabilities();
409
410   if (!params.is_offscreen) {
411     decoder_->SetResizeCallback(base::Bind(
412         &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
413   }
414   decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
415
416   return true;
417 }
418
419 void InProcessCommandBuffer::Destroy() {
420   CheckSequencedThread();
421
422   base::WaitableEvent completion(true, false);
423   bool result = false;
424   base::Callback<bool(void)> destroy_task = base::Bind(
425       &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
426   QueueTask(
427       base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
428   completion.Wait();
429 }
430
431 bool InProcessCommandBuffer::DestroyOnGpuThread() {
432   CheckSequencedThread();
433   gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
434   command_buffer_.reset();
435   // Clean up GL resources if possible.
436   bool have_context = context_ && context_->MakeCurrent(surface_);
437   if (decoder_) {
438     decoder_->Destroy(have_context);
439     decoder_.reset();
440   }
441   context_ = NULL;
442   surface_ = NULL;
443   gl_share_group_ = NULL;
444 #if defined(OS_ANDROID)
445   stream_texture_manager_.reset();
446 #endif
447
448   return true;
449 }
450
451 void InProcessCommandBuffer::CheckSequencedThread() {
452   DCHECK(!sequence_checker_ ||
453          sequence_checker_->CalledOnValidSequencedThread());
454 }
455
456 void InProcessCommandBuffer::OnContextLost() {
457   CheckSequencedThread();
458   if (!context_lost_callback_.is_null()) {
459     context_lost_callback_.Run();
460     context_lost_callback_.Reset();
461   }
462
463   context_lost_ = true;
464 }
465
466 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
467   CheckSequencedThread();
468   base::AutoLock lock(state_after_last_flush_lock_);
469   if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
470     last_state_ = state_after_last_flush_;
471   return last_state_;
472 }
473
474 CommandBuffer::State InProcessCommandBuffer::GetState() {
475   CheckSequencedThread();
476   return GetStateFast();
477 }
478
479 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
480   CheckSequencedThread();
481   return last_state_;
482 }
483
484 int32 InProcessCommandBuffer::GetLastToken() {
485   CheckSequencedThread();
486   GetStateFast();
487   return last_state_.token;
488 }
489
490 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
491   CheckSequencedThread();
492   ScopedEvent handle_flush(&flush_event_);
493   base::AutoLock lock(command_buffer_lock_);
494   command_buffer_->Flush(put_offset);
495   {
496     // Update state before signaling the flush event.
497     base::AutoLock lock(state_after_last_flush_lock_);
498     state_after_last_flush_ = command_buffer_->GetState();
499   }
500   DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
501          (error::IsError(state_after_last_flush_.error) && context_lost_));
502
503   // If we've processed all pending commands but still have pending queries,
504   // pump idle work until the query is passed.
505   if (put_offset == state_after_last_flush_.get_offset &&
506       gpu_scheduler_->HasMoreWork()) {
507     service_->ScheduleIdleWork(
508         base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
509                    gpu_thread_weak_ptr_));
510   }
511 }
512
513 void InProcessCommandBuffer::ScheduleMoreIdleWork() {
514   CheckSequencedThread();
515   base::AutoLock lock(command_buffer_lock_);
516   if (gpu_scheduler_->HasMoreWork()) {
517     gpu_scheduler_->PerformIdleWork();
518     service_->ScheduleIdleWork(
519         base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
520                    gpu_thread_weak_ptr_));
521   }
522 }
523
524 void InProcessCommandBuffer::Flush(int32 put_offset) {
525   CheckSequencedThread();
526   if (last_state_.error != gpu::error::kNoError)
527     return;
528
529   if (last_put_offset_ == put_offset)
530     return;
531
532   last_put_offset_ = put_offset;
533   base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
534                                   gpu_thread_weak_ptr_,
535                                   put_offset);
536   QueueTask(task);
537 }
538
539 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
540   CheckSequencedThread();
541   while (!InRange(start, end, GetLastToken()) &&
542          last_state_.error == gpu::error::kNoError)
543     flush_event_.Wait();
544 }
545
546 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
547   CheckSequencedThread();
548
549   GetStateFast();
550   while (!InRange(start, end, last_state_.get_offset) &&
551          last_state_.error == gpu::error::kNoError) {
552     flush_event_.Wait();
553     GetStateFast();
554   }
555 }
556
557 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
558   CheckSequencedThread();
559   if (last_state_.error != gpu::error::kNoError)
560     return;
561
562   {
563     base::AutoLock lock(command_buffer_lock_);
564     command_buffer_->SetGetBuffer(shm_id);
565     last_put_offset_ = 0;
566   }
567   {
568     base::AutoLock lock(state_after_last_flush_lock_);
569     state_after_last_flush_ = command_buffer_->GetState();
570   }
571 }
572
573 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
574                                                                    int32* id) {
575   CheckSequencedThread();
576   base::AutoLock lock(command_buffer_lock_);
577   return command_buffer_->CreateTransferBuffer(size, id);
578 }
579
580 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
581   CheckSequencedThread();
582   base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
583                                   base::Unretained(command_buffer_.get()),
584                                   id);
585
586   QueueTask(task);
587 }
588
589 scoped_refptr<gpu::Buffer> InProcessCommandBuffer::GetTransferBuffer(int32 id) {
590   NOTREACHED();
591   return NULL;
592 }
593
594 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
595   return capabilities_;
596 }
597
598 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
599     size_t width,
600     size_t height,
601     unsigned internalformat,
602     int32* id) {
603   CheckSequencedThread();
604   base::AutoLock lock(command_buffer_lock_);
605   return gpu_control_->CreateGpuMemoryBuffer(width,
606                                              height,
607                                              internalformat,
608                                              id);
609 }
610
611 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
612   CheckSequencedThread();
613   base::Closure task = base::Bind(&GpuControl::DestroyGpuMemoryBuffer,
614                                   base::Unretained(gpu_control_.get()),
615                                   id);
616
617   QueueTask(task);
618 }
619
620 uint32 InProcessCommandBuffer::InsertSyncPoint() {
621   uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
622   QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
623                        base::Unretained(this),
624                        sync_point));
625   return sync_point;
626 }
627
628 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
629   gles2::MailboxManager* mailbox_manager =
630       decoder_->GetContextGroup()->mailbox_manager();
631   if (mailbox_manager->UsesSync() && MakeCurrent())
632     mailbox_manager->PushTextureUpdates();
633   g_sync_point_manager.Get().RetireSyncPoint(sync_point);
634 }
635
636 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
637                                              const base::Closure& callback) {
638   CheckSequencedThread();
639   QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
640                        base::Unretained(this),
641                        sync_point,
642                        WrapCallback(callback)));
643 }
644
645 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
646     unsigned sync_point,
647     const base::Closure& callback) {
648   if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
649     callback.Run();
650   } else {
651     service_->ScheduleIdleWork(
652         base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
653                    gpu_thread_weak_ptr_,
654                    sync_point,
655                    callback));
656   }
657 }
658
659 void InProcessCommandBuffer::SignalQuery(unsigned query,
660                                          const base::Closure& callback) {
661   CheckSequencedThread();
662   QueueTask(base::Bind(&GpuControl::SignalQuery,
663                        base::Unretained(gpu_control_.get()),
664                        query,
665                        WrapCallback(callback)));
666 }
667
668 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
669
670 void InProcessCommandBuffer::SendManagedMemoryStats(
671     const gpu::ManagedMemoryStats& stats) {
672 }
673
674 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
675   QueueTask(WrapCallback(callback));
676 }
677
678 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
679   base::WaitableEvent completion(true, false);
680   uint32 stream_id = 0;
681   base::Callback<uint32(void)> task =
682       base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
683                  base::Unretained(this),
684                  texture_id);
685   QueueTask(
686       base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
687   completion.Wait();
688   return stream_id;
689 }
690
691 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
692     uint32 client_texture_id) {
693 #if defined(OS_ANDROID)
694   return stream_texture_manager_->CreateStreamTexture(
695       client_texture_id, decoder_->GetContextGroup()->texture_manager());
696 #else
697   return 0;
698 #endif
699 }
700
701 gpu::error::Error InProcessCommandBuffer::GetLastError() {
702   CheckSequencedThread();
703   return last_state_.error;
704 }
705
706 bool InProcessCommandBuffer::Initialize() {
707   NOTREACHED();
708   return false;
709 }
710
711 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
712
713 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
714
715 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
716   NOTREACHED();
717 }
718
719 void InProcessCommandBuffer::SetContextLostReason(
720     gpu::error::ContextLostReason reason) {
721   NOTREACHED();
722 }
723
724 namespace {
725
726 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
727                          const base::Closure& callback) {
728   if (!loop->BelongsToCurrentThread()) {
729     loop->PostTask(FROM_HERE, callback);
730   } else {
731     callback.Run();
732   }
733 }
734
735 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
736   DCHECK(callback.get());
737   callback->Run();
738 }
739
740 }  // anonymous namespace
741
742 base::Closure InProcessCommandBuffer::WrapCallback(
743     const base::Closure& callback) {
744   // Make sure the callback gets deleted on the target thread by passing
745   // ownership.
746   scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
747   base::Closure callback_on_client_thread =
748       base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
749   base::Closure wrapped_callback =
750       base::Bind(&PostCallback, base::MessageLoopProxy::current(),
751                  callback_on_client_thread);
752   return wrapped_callback;
753 }
754
755 #if defined(OS_ANDROID)
756 scoped_refptr<gfx::SurfaceTexture>
757 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
758   DCHECK(stream_texture_manager_);
759   return stream_texture_manager_->GetSurfaceTexture(stream_id);
760 }
761 #endif
762
763 // static
764 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
765     GpuMemoryBufferFactory* factory) {
766   g_gpu_memory_buffer_factory = factory;
767 }
768
769 }  // namespace gpu