da7433e51939a4186d49b2047d3291313f640d5d
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / service / in_process_command_buffer.cc
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <queue>
8 #include <set>
9 #include <utility>
10
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
14 #endif
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
17
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/service/command_buffer_service.h"
28 #include "gpu/command_buffer/service/context_group.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gpu_scheduler.h"
31 #include "gpu/command_buffer/service/image_manager.h"
32 #include "gpu/command_buffer/service/mailbox_manager.h"
33 #include "gpu/command_buffer/service/memory_tracking.h"
34 #include "gpu/command_buffer/service/query_manager.h"
35 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
36 #include "ui/gfx/size.h"
37 #include "ui/gl/gl_context.h"
38 #include "ui/gl/gl_image.h"
39 #include "ui/gl/gl_share_group.h"
40
41 #if defined(OS_ANDROID)
42 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
43 #include "ui/gl/android/surface_texture.h"
44 #endif
45
46 namespace gpu {
47
48 namespace {
49
50 static InProcessGpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
51
52 template <typename T>
53 static void RunTaskWithResult(base::Callback<T(void)> task,
54                               T* result,
55                               base::WaitableEvent* completion) {
56   *result = task.Run();
57   completion->Signal();
58 }
59
60 class GpuInProcessThread
61     : public base::Thread,
62       public InProcessCommandBuffer::Service,
63       public base::RefCountedThreadSafe<GpuInProcessThread> {
64  public:
65   GpuInProcessThread();
66
67   virtual void AddRef() const OVERRIDE {
68     base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
69   }
70   virtual void Release() const OVERRIDE {
71     base::RefCountedThreadSafe<GpuInProcessThread>::Release();
72   }
73
74   virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
75   virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
76   virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
77   virtual scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
78       OVERRIDE;
79
80  private:
81   virtual ~GpuInProcessThread();
82   friend class base::RefCountedThreadSafe<GpuInProcessThread>;
83
84   scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
85   DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
86 };
87
88 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
89   Start();
90 }
91
92 GpuInProcessThread::~GpuInProcessThread() {
93   Stop();
94 }
95
96 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
97   message_loop()->PostTask(FROM_HERE, task);
98 }
99
100 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
101   message_loop()->PostDelayedTask(
102       FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
103 }
104
105 scoped_refptr<gles2::ShaderTranslatorCache>
106 GpuInProcessThread::shader_translator_cache() {
107   if (!shader_translator_cache_.get())
108     shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
109   return shader_translator_cache_;
110 }
111
112 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
113     LAZY_INSTANCE_INITIALIZER;
114 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
115     LAZY_INSTANCE_INITIALIZER;
116
117 class ScopedEvent {
118  public:
119   ScopedEvent(base::WaitableEvent* event) : event_(event) {}
120   ~ScopedEvent() { event_->Signal(); }
121
122  private:
123   base::WaitableEvent* event_;
124 };
125
126 class SyncPointManager {
127  public:
128   SyncPointManager();
129   ~SyncPointManager();
130
131   uint32 GenerateSyncPoint();
132   void RetireSyncPoint(uint32 sync_point);
133
134   bool IsSyncPointPassed(uint32 sync_point);
135   void WaitSyncPoint(uint32 sync_point);
136
137 private:
138   // This lock protects access to pending_sync_points_ and next_sync_point_ and
139   // is used with the ConditionVariable to signal when a sync point is retired.
140   base::Lock lock_;
141   std::set<uint32> pending_sync_points_;
142   uint32 next_sync_point_;
143   base::ConditionVariable cond_var_;
144 };
145
146 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
147
148 SyncPointManager::~SyncPointManager() {
149   DCHECK_EQ(pending_sync_points_.size(), 0U);
150 }
151
152 uint32 SyncPointManager::GenerateSyncPoint() {
153   base::AutoLock lock(lock_);
154   uint32 sync_point = next_sync_point_++;
155   DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
156   pending_sync_points_.insert(sync_point);
157   return sync_point;
158 }
159
160 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
161   base::AutoLock lock(lock_);
162   DCHECK(pending_sync_points_.count(sync_point));
163   pending_sync_points_.erase(sync_point);
164   cond_var_.Broadcast();
165 }
166
167 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
168   base::AutoLock lock(lock_);
169   return pending_sync_points_.count(sync_point) == 0;
170 }
171
172 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
173   base::AutoLock lock(lock_);
174   while (pending_sync_points_.count(sync_point)) {
175     cond_var_.Wait();
176   }
177 }
178
179 base::LazyInstance<SyncPointManager> g_sync_point_manager =
180     LAZY_INSTANCE_INITIALIZER;
181
182 bool WaitSyncPoint(uint32 sync_point) {
183   g_sync_point_manager.Get().WaitSyncPoint(sync_point);
184   return true;
185 }
186
187 }  // anonyous namespace
188
189 InProcessCommandBuffer::Service::Service() {}
190
191 InProcessCommandBuffer::Service::~Service() {}
192
193 scoped_refptr<gles2::MailboxManager>
194 InProcessCommandBuffer::Service::mailbox_manager() {
195   if (!mailbox_manager_.get())
196     mailbox_manager_ = new gles2::MailboxManager();
197   return mailbox_manager_;
198 }
199
200 scoped_refptr<InProcessCommandBuffer::Service>
201 InProcessCommandBuffer::GetDefaultService() {
202   base::AutoLock lock(default_thread_clients_lock_.Get());
203   scoped_refptr<Service> service;
204   if (!default_thread_clients_.Get().empty()) {
205     InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
206     service = other->service_;
207     DCHECK(service.get());
208   } else {
209     service = new GpuInProcessThread;
210   }
211   return service;
212 }
213
214 InProcessCommandBuffer::InProcessCommandBuffer(
215     const scoped_refptr<Service>& service)
216     : context_lost_(false),
217       idle_work_pending_(false),
218       last_put_offset_(-1),
219       flush_event_(false, false),
220       service_(service.get() ? service : GetDefaultService()),
221       gpu_thread_weak_ptr_factory_(this) {
222   if (!service.get()) {
223     base::AutoLock lock(default_thread_clients_lock_.Get());
224     default_thread_clients_.Get().insert(this);
225   }
226 }
227
228 InProcessCommandBuffer::~InProcessCommandBuffer() {
229   Destroy();
230   base::AutoLock lock(default_thread_clients_lock_.Get());
231   default_thread_clients_.Get().erase(this);
232 }
233
234 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
235   CheckSequencedThread();
236   DCHECK(!surface_->IsOffscreen());
237   surface_->Resize(size);
238 }
239
240 bool InProcessCommandBuffer::MakeCurrent() {
241   CheckSequencedThread();
242   command_buffer_lock_.AssertAcquired();
243
244   if (!context_lost_ && decoder_->MakeCurrent())
245     return true;
246   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
247   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
248   command_buffer_->SetParseError(gpu::error::kLostContext);
249   return false;
250 }
251
252 void InProcessCommandBuffer::PumpCommands() {
253   CheckSequencedThread();
254   command_buffer_lock_.AssertAcquired();
255
256   if (!MakeCurrent())
257     return;
258
259   gpu_scheduler_->PutChanged();
260 }
261
262 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
263   CheckSequencedThread();
264   command_buffer_lock_.AssertAcquired();
265   command_buffer_->SetGetBuffer(transfer_buffer_id);
266   return true;
267 }
268
269 bool InProcessCommandBuffer::Initialize(
270     scoped_refptr<gfx::GLSurface> surface,
271     bool is_offscreen,
272     gfx::AcceleratedWidget window,
273     const gfx::Size& size,
274     const std::vector<int32>& attribs,
275     gfx::GpuPreference gpu_preference,
276     const base::Closure& context_lost_callback,
277     InProcessCommandBuffer* share_group) {
278   DCHECK(!share_group || service_.get() == share_group->service_.get());
279   context_lost_callback_ = WrapCallback(context_lost_callback);
280
281   if (surface.get()) {
282     // GPU thread must be the same as client thread due to GLSurface not being
283     // thread safe.
284     sequence_checker_.reset(new base::SequenceChecker);
285     surface_ = surface;
286   }
287
288   gpu::Capabilities capabilities;
289   InitializeOnGpuThreadParams params(is_offscreen,
290                                      window,
291                                      size,
292                                      attribs,
293                                      gpu_preference,
294                                      &capabilities,
295                                      share_group);
296
297   base::Callback<bool(void)> init_task =
298       base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
299                  base::Unretained(this),
300                  params);
301
302   base::WaitableEvent completion(true, false);
303   bool result = false;
304   QueueTask(
305       base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
306   completion.Wait();
307
308   if (result) {
309     capabilities_ = capabilities;
310     capabilities_.map_image =
311         capabilities_.map_image && g_gpu_memory_buffer_factory;
312   }
313   return result;
314 }
315
316 bool InProcessCommandBuffer::InitializeOnGpuThread(
317     const InitializeOnGpuThreadParams& params) {
318   CheckSequencedThread();
319   gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
320
321   DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
322
323   TransferBufferManager* manager = new TransferBufferManager();
324   transfer_buffer_manager_.reset(manager);
325   manager->Initialize();
326
327   scoped_ptr<CommandBufferService> command_buffer(
328       new CommandBufferService(transfer_buffer_manager_.get()));
329   command_buffer->SetPutOffsetChangeCallback(base::Bind(
330       &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
331   command_buffer->SetParseErrorCallback(base::Bind(
332       &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
333
334   if (!command_buffer->Initialize()) {
335     LOG(ERROR) << "Could not initialize command buffer.";
336     DestroyOnGpuThread();
337     return false;
338   }
339
340   gl_share_group_ = params.context_group
341                         ? params.context_group->gl_share_group_.get()
342                         : new gfx::GLShareGroup;
343
344 #if defined(OS_ANDROID)
345   stream_texture_manager_.reset(new StreamTextureManagerInProcess);
346 #endif
347
348   bool bind_generates_resource = false;
349   decoder_.reset(gles2::GLES2Decoder::Create(
350       params.context_group
351           ? params.context_group->decoder_->GetContextGroup()
352           : new gles2::ContextGroup(service_->mailbox_manager(),
353                                     NULL,
354                                     service_->shader_translator_cache(),
355                                     NULL,
356                                     bind_generates_resource)));
357
358   gpu_scheduler_.reset(
359       new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
360   command_buffer->SetGetBufferChangeCallback(base::Bind(
361       &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
362   command_buffer_ = command_buffer.Pass();
363
364   decoder_->set_engine(gpu_scheduler_.get());
365
366   if (!surface_.get()) {
367     if (params.is_offscreen)
368       surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
369     else
370       surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
371   }
372
373   if (!surface_.get()) {
374     LOG(ERROR) << "Could not create GLSurface.";
375     DestroyOnGpuThread();
376     return false;
377   }
378
379   if (service_->UseVirtualizedGLContexts()) {
380     context_ = gl_share_group_->GetSharedContext();
381     if (!context_.get()) {
382       context_ = gfx::GLContext::CreateGLContext(
383           gl_share_group_.get(), surface_.get(), params.gpu_preference);
384       gl_share_group_->SetSharedContext(context_.get());
385     }
386
387     context_ = new GLContextVirtual(
388         gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
389     if (context_->Initialize(surface_.get(), params.gpu_preference)) {
390       VLOG(1) << "Created virtual GL context.";
391     } else {
392       context_ = NULL;
393     }
394   } else {
395     context_ = gfx::GLContext::CreateGLContext(
396         gl_share_group_.get(), surface_.get(), params.gpu_preference);
397   }
398
399   if (!context_.get()) {
400     LOG(ERROR) << "Could not create GLContext.";
401     DestroyOnGpuThread();
402     return false;
403   }
404
405   if (!context_->MakeCurrent(surface_.get())) {
406     LOG(ERROR) << "Could not make context current.";
407     DestroyOnGpuThread();
408     return false;
409   }
410
411   gles2::DisallowedFeatures disallowed_features;
412   disallowed_features.gpu_memory_manager = true;
413   if (!decoder_->Initialize(surface_,
414                             context_,
415                             params.is_offscreen,
416                             params.size,
417                             disallowed_features,
418                             params.attribs)) {
419     LOG(ERROR) << "Could not initialize decoder.";
420     DestroyOnGpuThread();
421     return false;
422   }
423   *params.capabilities = decoder_->GetCapabilities();
424
425   if (!params.is_offscreen) {
426     decoder_->SetResizeCallback(base::Bind(
427         &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
428   }
429   decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
430
431   return true;
432 }
433
434 void InProcessCommandBuffer::Destroy() {
435   CheckSequencedThread();
436
437   base::WaitableEvent completion(true, false);
438   bool result = false;
439   base::Callback<bool(void)> destroy_task = base::Bind(
440       &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
441   QueueTask(
442       base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
443   completion.Wait();
444 }
445
446 bool InProcessCommandBuffer::DestroyOnGpuThread() {
447   CheckSequencedThread();
448   gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
449   command_buffer_.reset();
450   // Clean up GL resources if possible.
451   bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
452   if (decoder_) {
453     decoder_->Destroy(have_context);
454     decoder_.reset();
455   }
456   context_ = NULL;
457   surface_ = NULL;
458   gl_share_group_ = NULL;
459 #if defined(OS_ANDROID)
460   stream_texture_manager_.reset();
461 #endif
462
463   return true;
464 }
465
466 void InProcessCommandBuffer::CheckSequencedThread() {
467   DCHECK(!sequence_checker_ ||
468          sequence_checker_->CalledOnValidSequencedThread());
469 }
470
471 void InProcessCommandBuffer::OnContextLost() {
472   CheckSequencedThread();
473   if (!context_lost_callback_.is_null()) {
474     context_lost_callback_.Run();
475     context_lost_callback_.Reset();
476   }
477
478   context_lost_ = true;
479 }
480
481 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
482   CheckSequencedThread();
483   base::AutoLock lock(state_after_last_flush_lock_);
484   if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
485     last_state_ = state_after_last_flush_;
486   return last_state_;
487 }
488
489 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
490   CheckSequencedThread();
491   return last_state_;
492 }
493
494 int32 InProcessCommandBuffer::GetLastToken() {
495   CheckSequencedThread();
496   GetStateFast();
497   return last_state_.token;
498 }
499
500 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
501   CheckSequencedThread();
502   ScopedEvent handle_flush(&flush_event_);
503   base::AutoLock lock(command_buffer_lock_);
504   command_buffer_->Flush(put_offset);
505   {
506     // Update state before signaling the flush event.
507     base::AutoLock lock(state_after_last_flush_lock_);
508     state_after_last_flush_ = command_buffer_->GetLastState();
509   }
510   DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
511          (error::IsError(state_after_last_flush_.error) && context_lost_));
512
513   // If we've processed all pending commands but still have pending queries,
514   // pump idle work until the query is passed.
515   if (put_offset == state_after_last_flush_.get_offset &&
516       gpu_scheduler_->HasMoreWork()) {
517     ScheduleIdleWorkOnGpuThread();
518   }
519 }
520
521 void InProcessCommandBuffer::PerformIdleWork() {
522   CheckSequencedThread();
523   idle_work_pending_ = false;
524   base::AutoLock lock(command_buffer_lock_);
525   if (MakeCurrent() && gpu_scheduler_->HasMoreWork()) {
526     gpu_scheduler_->PerformIdleWork();
527     ScheduleIdleWorkOnGpuThread();
528   }
529 }
530
531 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() {
532   CheckSequencedThread();
533   if (idle_work_pending_)
534     return;
535   idle_work_pending_ = true;
536   service_->ScheduleIdleWork(
537       base::Bind(&InProcessCommandBuffer::PerformIdleWork,
538                  gpu_thread_weak_ptr_));
539 }
540
541 void InProcessCommandBuffer::Flush(int32 put_offset) {
542   CheckSequencedThread();
543   if (last_state_.error != gpu::error::kNoError)
544     return;
545
546   if (last_put_offset_ == put_offset)
547     return;
548
549   last_put_offset_ = put_offset;
550   base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
551                                   gpu_thread_weak_ptr_,
552                                   put_offset);
553   QueueTask(task);
554 }
555
556 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
557   CheckSequencedThread();
558   while (!InRange(start, end, GetLastToken()) &&
559          last_state_.error == gpu::error::kNoError)
560     flush_event_.Wait();
561 }
562
563 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
564   CheckSequencedThread();
565
566   GetStateFast();
567   while (!InRange(start, end, last_state_.get_offset) &&
568          last_state_.error == gpu::error::kNoError) {
569     flush_event_.Wait();
570     GetStateFast();
571   }
572 }
573
574 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
575   CheckSequencedThread();
576   if (last_state_.error != gpu::error::kNoError)
577     return;
578
579   {
580     base::AutoLock lock(command_buffer_lock_);
581     command_buffer_->SetGetBuffer(shm_id);
582     last_put_offset_ = 0;
583   }
584   {
585     base::AutoLock lock(state_after_last_flush_lock_);
586     state_after_last_flush_ = command_buffer_->GetLastState();
587   }
588 }
589
590 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
591                                                                    int32* id) {
592   CheckSequencedThread();
593   base::AutoLock lock(command_buffer_lock_);
594   return command_buffer_->CreateTransferBuffer(size, id);
595 }
596
597 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
598   CheckSequencedThread();
599   base::Closure task =
600       base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
601                  base::Unretained(this),
602                  id);
603
604   QueueTask(task);
605 }
606
607 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32 id) {
608   base::AutoLock lock(command_buffer_lock_);
609   command_buffer_->DestroyTransferBuffer(id);
610 }
611
612 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
613   return capabilities_;
614 }
615
616 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
617     size_t width,
618     size_t height,
619     unsigned internalformat,
620     unsigned usage,
621     int32* id) {
622   CheckSequencedThread();
623
624   *id = -1;
625
626   scoped_ptr<gfx::GpuMemoryBuffer> buffer =
627       g_gpu_memory_buffer_factory->AllocateGpuMemoryBuffer(
628           width, height, internalformat, usage);
629   if (!buffer.get())
630     return NULL;
631
632   static int32 next_id = 1;
633   int32 new_id = next_id++;
634
635   base::Closure task =
636       base::Bind(&InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread,
637                  base::Unretained(this),
638                  new_id,
639                  buffer->GetHandle(),
640                  width,
641                  height,
642                  internalformat);
643
644   QueueTask(task);
645
646   *id = new_id;
647   DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
648   return gpu_memory_buffers_.add(new_id, buffer.Pass()).first->second;
649 }
650
651 void InProcessCommandBuffer::RegisterGpuMemoryBufferOnGpuThread(
652     int32 id,
653     const gfx::GpuMemoryBufferHandle& handle,
654     size_t width,
655     size_t height,
656     unsigned internalformat) {
657   scoped_refptr<gfx::GLImage> image =
658       g_gpu_memory_buffer_factory->CreateImageForGpuMemoryBuffer(
659           handle, gfx::Size(width, height), internalformat);
660   if (!image.get())
661     return;
662
663   // For Android specific workaround.
664   gles2::ContextGroup* context_group = decoder_->GetContextGroup();
665   if (context_group->feature_info()->workarounds().release_image_after_use)
666     image->SetReleaseAfterUse();
667
668   if (decoder_) {
669     gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
670     DCHECK(image_manager);
671     image_manager->AddImage(image.get(), id);
672   }
673 }
674
675 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
676   CheckSequencedThread();
677
678   base::Closure task =
679       base::Bind(&InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread,
680                  base::Unretained(this),
681                  id);
682
683   QueueTask(task);
684
685   gpu_memory_buffers_.erase(id);
686 }
687
688 void InProcessCommandBuffer::UnregisterGpuMemoryBufferOnGpuThread(int32 id) {
689   if (decoder_) {
690     gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
691     DCHECK(image_manager);
692     image_manager->RemoveImage(id);
693   }
694 }
695
696 uint32 InProcessCommandBuffer::InsertSyncPoint() {
697   uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
698   QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
699                        base::Unretained(this),
700                        sync_point));
701   return sync_point;
702 }
703
704 uint32 InProcessCommandBuffer::InsertFutureSyncPoint() {
705   return g_sync_point_manager.Get().GenerateSyncPoint();
706 }
707
708 void InProcessCommandBuffer::RetireSyncPoint(uint32 sync_point) {
709   QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
710                        base::Unretained(this),
711                        sync_point));
712 }
713
714 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
715   gles2::MailboxManager* mailbox_manager =
716       decoder_->GetContextGroup()->mailbox_manager();
717   if (mailbox_manager->UsesSync()) {
718     bool make_current_success = false;
719     {
720       base::AutoLock lock(command_buffer_lock_);
721       make_current_success = MakeCurrent();
722     }
723     if (make_current_success)
724       mailbox_manager->PushTextureUpdates();
725   }
726   g_sync_point_manager.Get().RetireSyncPoint(sync_point);
727 }
728
729 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
730                                              const base::Closure& callback) {
731   CheckSequencedThread();
732   QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
733                        base::Unretained(this),
734                        sync_point,
735                        WrapCallback(callback)));
736 }
737
738 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
739     unsigned sync_point,
740     const base::Closure& callback) {
741   if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
742     callback.Run();
743   } else {
744     service_->ScheduleIdleWork(
745         base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
746                    gpu_thread_weak_ptr_,
747                    sync_point,
748                    callback));
749   }
750 }
751
752 void InProcessCommandBuffer::SignalQuery(unsigned query_id,
753                                          const base::Closure& callback) {
754   CheckSequencedThread();
755   QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
756                        base::Unretained(this),
757                        query_id,
758                        WrapCallback(callback)));
759 }
760
761 void InProcessCommandBuffer::SignalQueryOnGpuThread(
762     unsigned query_id,
763     const base::Closure& callback) {
764   gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
765   DCHECK(query_manager_);
766
767   gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
768   if (!query)
769     callback.Run();
770   else
771     query->AddCallback(callback);
772 }
773
774 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
775
776 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
777   QueueTask(WrapCallback(callback));
778 }
779
780 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
781   base::WaitableEvent completion(true, false);
782   uint32 stream_id = 0;
783   base::Callback<uint32(void)> task =
784       base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
785                  base::Unretained(this),
786                  texture_id);
787   QueueTask(
788       base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
789   completion.Wait();
790   return stream_id;
791 }
792
793 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
794     uint32 client_texture_id) {
795 #if defined(OS_ANDROID)
796   return stream_texture_manager_->CreateStreamTexture(
797       client_texture_id, decoder_->GetContextGroup()->texture_manager());
798 #else
799   return 0;
800 #endif
801 }
802
803 gpu::error::Error InProcessCommandBuffer::GetLastError() {
804   CheckSequencedThread();
805   return last_state_.error;
806 }
807
808 bool InProcessCommandBuffer::Initialize() {
809   NOTREACHED();
810   return false;
811 }
812
813 namespace {
814
815 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
816                          const base::Closure& callback) {
817   if (!loop->BelongsToCurrentThread()) {
818     loop->PostTask(FROM_HERE, callback);
819   } else {
820     callback.Run();
821   }
822 }
823
824 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
825   DCHECK(callback.get());
826   callback->Run();
827 }
828
829 }  // anonymous namespace
830
831 base::Closure InProcessCommandBuffer::WrapCallback(
832     const base::Closure& callback) {
833   // Make sure the callback gets deleted on the target thread by passing
834   // ownership.
835   scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
836   base::Closure callback_on_client_thread =
837       base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
838   base::Closure wrapped_callback =
839       base::Bind(&PostCallback, base::MessageLoopProxy::current(),
840                  callback_on_client_thread);
841   return wrapped_callback;
842 }
843
844 #if defined(OS_ANDROID)
845 scoped_refptr<gfx::SurfaceTexture>
846 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
847   DCHECK(stream_texture_manager_);
848   return stream_texture_manager_->GetSurfaceTexture(stream_id);
849 }
850 #endif
851
852 // static
853 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
854     InProcessGpuMemoryBufferFactory* factory) {
855   g_gpu_memory_buffer_factory = factory;
856 }
857
858 }  // namespace gpu