Upstream version 5.34.92.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / client / command_buffer_proxy_impl.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
6
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/gpu_messages.h"
16 #include "content/common/view_messages.h"
17 #include "gpu/command_buffer/common/cmd_buffer_common.h"
18 #include "gpu/command_buffer/common/command_buffer_shared.h"
19 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
20 #include "ui/gfx/size.h"
21
22 namespace content {
23
24 CommandBufferProxyImpl::CommandBufferProxyImpl(
25     GpuChannelHost* channel,
26     int route_id)
27     : channel_(channel),
28       route_id_(route_id),
29       flush_count_(0),
30       last_put_offset_(-1),
31       next_signal_id_(0) {
32 }
33
34 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
35   FOR_EACH_OBSERVER(DeletionObserver,
36                     deletion_observers_,
37                     OnWillDeleteImpl());
38
39   // Delete all the locally cached shared memory objects, closing the handle
40   // in this process.
41   for (TransferBufferMap::iterator it = transfer_buffers_.begin();
42        it != transfer_buffers_.end();
43        ++it) {
44     delete it->second.shared_memory;
45     it->second.shared_memory = NULL;
46   }
47 }
48
49 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
50   bool handled = true;
51   IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
52     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
53     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
54     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
55     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
56                         OnSetMemoryAllocation);
57     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
58                         OnSignalSyncPointAck);
59     IPC_MESSAGE_UNHANDLED(handled = false)
60   IPC_END_MESSAGE_MAP()
61
62   DCHECK(handled);
63   return handled;
64 }
65
66 void CommandBufferProxyImpl::OnChannelError() {
67   OnDestroyed(gpu::error::kUnknown);
68 }
69
70 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
71   // Prevent any further messages from being sent.
72   channel_ = NULL;
73
74   // When the client sees that the context is lost, they should delete this
75   // CommandBufferProxyImpl and create a new one.
76   last_state_.error = gpu::error::kLostContext;
77   last_state_.context_lost_reason = reason;
78
79   if (!channel_error_callback_.is_null()) {
80     channel_error_callback_.Run();
81     // Avoid calling the error callback more than once.
82     channel_error_callback_.Reset();
83   }
84 }
85
86 void CommandBufferProxyImpl::OnEchoAck() {
87   DCHECK(!echo_tasks_.empty());
88   base::Closure callback = echo_tasks_.front();
89   echo_tasks_.pop();
90   callback.Run();
91 }
92
93 void CommandBufferProxyImpl::OnConsoleMessage(
94     const GPUCommandBufferConsoleMessage& message) {
95   if (!console_message_callback_.is_null()) {
96     console_message_callback_.Run(message.message, message.id);
97   }
98 }
99
100 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
101     const MemoryAllocationChangedCallback& callback) {
102   if (last_state_.error != gpu::error::kNoError)
103     return;
104
105   memory_allocation_changed_callback_ = callback;
106   Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
107       route_id_, !memory_allocation_changed_callback_.is_null()));
108 }
109
110 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
111   deletion_observers_.AddObserver(observer);
112 }
113
114 void CommandBufferProxyImpl::RemoveDeletionObserver(
115     DeletionObserver* observer) {
116   deletion_observers_.RemoveObserver(observer);
117 }
118
119 void CommandBufferProxyImpl::OnSetMemoryAllocation(
120     const gpu::MemoryAllocation& allocation) {
121   if (!memory_allocation_changed_callback_.is_null())
122     memory_allocation_changed_callback_.Run(allocation);
123 }
124
125 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
126   SignalTaskMap::iterator it = signal_tasks_.find(id);
127   DCHECK(it != signal_tasks_.end());
128   base::Closure callback = it->second;
129   signal_tasks_.erase(it);
130   callback.Run();
131 }
132
133 void CommandBufferProxyImpl::SetChannelErrorCallback(
134     const base::Closure& callback) {
135   channel_error_callback_ = callback;
136 }
137
138 bool CommandBufferProxyImpl::Initialize() {
139   shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
140       sizeof(*shared_state())).release());
141   if (!shared_state_shm_)
142     return false;
143
144   if (!shared_state_shm_->Map(sizeof(*shared_state())))
145     return false;
146
147   shared_state()->Initialize();
148
149   // This handle is owned by the GPU process and must be passed to it or it
150   // will leak. In otherwords, do not early out on error between here and the
151   // sending of the Initialize IPC below.
152   base::SharedMemoryHandle handle =
153       channel_->ShareToGpuProcess(shared_state_shm_->handle());
154   if (!base::SharedMemory::IsHandleValid(handle))
155     return false;
156
157   bool result;
158   if (!Send(new GpuCommandBufferMsg_Initialize(
159       route_id_, handle, &result, &capabilities_))) {
160     LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
161     return false;
162   }
163
164   if (!result) {
165     LOG(ERROR) << "Failed to initialize command buffer service.";
166     return false;
167   }
168
169   capabilities_.map_image = true;
170
171   return true;
172 }
173
174 gpu::CommandBuffer::State CommandBufferProxyImpl::GetState() {
175   // Send will flag state with lost context if IPC fails.
176   if (last_state_.error == gpu::error::kNoError) {
177     gpu::CommandBuffer::State state;
178     if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
179       OnUpdateState(state);
180   }
181
182   TryUpdateState();
183   return last_state_;
184 }
185
186 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
187   return last_state_;
188 }
189
190 int32 CommandBufferProxyImpl::GetLastToken() {
191   TryUpdateState();
192   return last_state_.token;
193 }
194
195 void CommandBufferProxyImpl::Flush(int32 put_offset) {
196   if (last_state_.error != gpu::error::kNoError)
197     return;
198
199   TRACE_EVENT1("gpu",
200                "CommandBufferProxyImpl::Flush",
201                "put_offset",
202                put_offset);
203
204   if (last_put_offset_ == put_offset)
205     return;
206
207   last_put_offset_ = put_offset;
208
209   Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
210                                           put_offset,
211                                           ++flush_count_));
212 }
213
214 void CommandBufferProxyImpl::SetLatencyInfo(
215     const std::vector<ui::LatencyInfo>& latency_info) {
216   if (last_state_.error != gpu::error::kNoError)
217     return;
218   Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
219 }
220
221 gpu::CommandBuffer::State CommandBufferProxyImpl::FlushSync(
222     int32 put_offset,
223     int32 last_known_get) {
224   TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
225                put_offset);
226   Flush(put_offset);
227   TryUpdateState();
228   if (last_known_get == last_state_.get_offset) {
229     // Send will flag state with lost context if IPC fails.
230     if (last_state_.error == gpu::error::kNoError) {
231       gpu::CommandBuffer::State state;
232       if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_,
233                                                     &state)))
234         OnUpdateState(state);
235     }
236     TryUpdateState();
237   }
238
239   return last_state_;
240 }
241
242 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
243   if (last_state_.error != gpu::error::kNoError)
244     return;
245
246   Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
247   last_put_offset_ = -1;
248 }
249
250 void CommandBufferProxyImpl::SetGetOffset(int32 get_offset) {
251   // Not implemented in proxy.
252   NOTREACHED();
253 }
254
255 gpu::Buffer CommandBufferProxyImpl::CreateTransferBuffer(size_t size,
256                                                          int32* id) {
257   *id = -1;
258
259   if (last_state_.error != gpu::error::kNoError)
260     return gpu::Buffer();
261
262   int32 new_id = channel_->ReserveTransferBufferId();
263   DCHECK(transfer_buffers_.find(new_id) == transfer_buffers_.end());
264
265   scoped_ptr<base::SharedMemory> shared_memory(
266       channel_->factory()->AllocateSharedMemory(size));
267   if (!shared_memory)
268     return gpu::Buffer();
269
270   DCHECK(!shared_memory->memory());
271   if (!shared_memory->Map(size))
272     return gpu::Buffer();
273
274   // This handle is owned by the GPU process and must be passed to it or it
275   // will leak. In otherwords, do not early out on error between here and the
276   // sending of the RegisterTransferBuffer IPC below.
277   base::SharedMemoryHandle handle =
278       channel_->ShareToGpuProcess(shared_memory->handle());
279   if (!base::SharedMemory::IsHandleValid(handle))
280     return gpu::Buffer();
281
282   if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
283                                                            new_id,
284                                                            handle,
285                                                            size))) {
286     return gpu::Buffer();
287   }
288
289   *id = new_id;
290   gpu::Buffer buffer;
291   buffer.ptr = shared_memory->memory();
292   buffer.size = size;
293   buffer.shared_memory = shared_memory.release();
294   transfer_buffers_[new_id] = buffer;
295
296   return buffer;
297 }
298
299 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
300   if (last_state_.error != gpu::error::kNoError)
301     return;
302
303   // Remove the transfer buffer from the client side cache.
304   TransferBufferMap::iterator it = transfer_buffers_.find(id);
305   if (it != transfer_buffers_.end()) {
306     delete it->second.shared_memory;
307     transfer_buffers_.erase(it);
308   }
309
310   Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
311 }
312
313 gpu::Buffer CommandBufferProxyImpl::GetTransferBuffer(int32 id) {
314   if (last_state_.error != gpu::error::kNoError)
315     return gpu::Buffer();
316
317   // Check local cache to see if there is already a client side shared memory
318   // object for this id.
319   TransferBufferMap::iterator it = transfer_buffers_.find(id);
320   if (it != transfer_buffers_.end()) {
321     return it->second;
322   }
323
324   // Assuming we are in the renderer process, the service is responsible for
325   // duplicating the handle. This might not be true for NaCl.
326   base::SharedMemoryHandle handle = base::SharedMemoryHandle();
327   uint32 size;
328   if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_,
329                                                       id,
330                                                       &handle,
331                                                       &size))) {
332     return gpu::Buffer();
333   }
334
335   // Cache the transfer buffer shared memory object client side.
336   scoped_ptr<base::SharedMemory> shared_memory(
337       new base::SharedMemory(handle, false));
338
339   // Map the shared memory on demand.
340   if (!shared_memory->memory()) {
341     if (!shared_memory->Map(size))
342       return gpu::Buffer();
343   }
344
345   gpu::Buffer buffer;
346   buffer.ptr = shared_memory->memory();
347   buffer.size = size;
348   buffer.shared_memory = shared_memory.release();
349   transfer_buffers_[id] = buffer;
350
351   return buffer;
352 }
353
354 void CommandBufferProxyImpl::SetToken(int32 token) {
355   // Not implemented in proxy.
356   NOTREACHED();
357 }
358
359 void CommandBufferProxyImpl::SetParseError(
360     gpu::error::Error error) {
361   // Not implemented in proxy.
362   NOTREACHED();
363 }
364
365 void CommandBufferProxyImpl::SetContextLostReason(
366     gpu::error::ContextLostReason reason) {
367   // Not implemented in proxy.
368   NOTREACHED();
369 }
370
371 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
372   return capabilities_;
373 }
374
375 gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
376     size_t width,
377     size_t height,
378     unsigned internalformat,
379     int32* id) {
380   *id = -1;
381
382   if (last_state_.error != gpu::error::kNoError)
383     return NULL;
384
385   int32 new_id = channel_->ReserveGpuMemoryBufferId();
386   DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
387
388   scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer(
389       channel_->factory()->AllocateGpuMemoryBuffer(width,
390                                                    height,
391                                                    internalformat));
392   if (!gpu_memory_buffer)
393     return NULL;
394
395   DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
396              gpu_memory_buffer->GetHandle()));
397
398   // This handle is owned by the GPU process and must be passed to it or it
399   // will leak. In otherwords, do not early out on error between here and the
400   // sending of the RegisterGpuMemoryBuffer IPC below.
401   gfx::GpuMemoryBufferHandle handle =
402       channel_->ShareGpuMemoryBufferToGpuProcess(
403           gpu_memory_buffer->GetHandle());
404
405   if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
406                 route_id_,
407                 new_id,
408                 handle,
409                 width,
410                 height,
411                 internalformat))) {
412     return NULL;
413   }
414
415   *id = new_id;
416   gpu_memory_buffers_[new_id] = gpu_memory_buffer.release();
417   return gpu_memory_buffers_[new_id];
418 }
419
420 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
421   if (last_state_.error != gpu::error::kNoError)
422     return;
423
424   // Remove the gpu memory buffer from the client side cache.
425   GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
426   if (it != gpu_memory_buffers_.end()) {
427     delete it->second;
428     gpu_memory_buffers_.erase(it);
429   }
430
431   Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_, id));
432 }
433
434 int CommandBufferProxyImpl::GetRouteID() const {
435   return route_id_;
436 }
437
438 void CommandBufferProxyImpl::Echo(const base::Closure& callback) {
439   if (last_state_.error != gpu::error::kNoError) {
440     return;
441   }
442
443   if (!Send(new GpuCommandBufferMsg_Echo(
444            route_id_, GpuCommandBufferMsg_EchoAck(route_id_)))) {
445     return;
446   }
447
448   echo_tasks_.push(callback);
449 }
450
451 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
452   if (last_state_.error != gpu::error::kNoError)
453     return 0;
454
455   int32 stream_id = 0;
456   Send(new GpuCommandBufferMsg_CreateStreamTexture(
457       route_id_, texture_id, &stream_id));
458   return stream_id;
459 }
460
461 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
462   if (last_state_.error != gpu::error::kNoError)
463     return 0;
464
465   uint32 sync_point = 0;
466   Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
467   return sync_point;
468 }
469
470 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
471                                              const base::Closure& callback) {
472   if (last_state_.error != gpu::error::kNoError)
473     return;
474
475   uint32 signal_id = next_signal_id_++;
476   if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
477                                                     sync_point,
478                                                     signal_id))) {
479     return;
480   }
481
482   signal_tasks_.insert(std::make_pair(signal_id, callback));
483 }
484
485 void CommandBufferProxyImpl::SignalQuery(uint32 query,
486                                          const base::Closure& callback) {
487   if (last_state_.error != gpu::error::kNoError)
488     return;
489
490   // Signal identifiers are hidden, so nobody outside of this class will see
491   // them. (And thus, they cannot save them.) The IDs themselves only last
492   // until the callback is invoked, which will happen as soon as the GPU
493   // catches upwith the command buffer.
494   // A malicious caller trying to create a collision by making next_signal_id
495   // would have to make calls at an astounding rate (300B/s) and even if they
496   // could do that, all they would do is to prevent some callbacks from getting
497   // called, leading to stalled threads and/or memory leaks.
498   uint32 signal_id = next_signal_id_++;
499   if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
500                                                 query,
501                                                 signal_id))) {
502     return;
503   }
504
505   signal_tasks_.insert(std::make_pair(signal_id, callback));
506 }
507
508 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
509   if (last_state_.error != gpu::error::kNoError)
510     return;
511
512   Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
513 }
514
515 void CommandBufferProxyImpl::SendManagedMemoryStats(
516     const gpu::ManagedMemoryStats& stats) {
517   if (last_state_.error != gpu::error::kNoError)
518     return;
519
520   Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_,
521                                                             stats));
522 }
523
524 bool CommandBufferProxyImpl::GenerateMailboxNames(
525     unsigned num,
526     std::vector<gpu::Mailbox>* names) {
527   if (last_state_.error != gpu::error::kNoError)
528     return false;
529
530   return channel_->GenerateMailboxNames(num, names);
531 }
532
533 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
534   if (last_state_.error != gpu::error::kNoError)
535     return false;
536
537   return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
538 }
539
540 scoped_ptr<media::VideoDecodeAccelerator>
541 CommandBufferProxyImpl::CreateVideoDecoder(
542     media::VideoCodecProfile profile,
543     media::VideoDecodeAccelerator::Client* client) {
544   int decoder_route_id;
545   scoped_ptr<media::VideoDecodeAccelerator> vda;
546   if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile,
547                                                        &decoder_route_id))) {
548     LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
549     return vda.Pass();
550   }
551
552   if (decoder_route_id < 0) {
553     DLOG(ERROR) << "Failed to Initialize GPU decoder on profile: " << profile;
554     return vda.Pass();
555   }
556
557   GpuVideoDecodeAcceleratorHost* decoder_host =
558       new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client,
559                                         this);
560   vda.reset(decoder_host);
561   return vda.Pass();
562 }
563
564 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
565   return last_state_.error;
566 }
567
568 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
569   // Caller should not intentionally send a message if the context is lost.
570   DCHECK(last_state_.error == gpu::error::kNoError);
571
572   if (channel_) {
573     if (channel_->Send(msg)) {
574       return true;
575     } else {
576       // Flag the command buffer as lost. Defer deleting the channel until
577       // OnChannelError is called after returning to the message loop in case
578       // it is referenced elsewhere.
579       last_state_.error = gpu::error::kLostContext;
580       return false;
581     }
582   }
583
584   // Callee takes ownership of message, regardless of whether Send is
585   // successful. See IPC::Sender.
586   delete msg;
587   return false;
588 }
589
590 void CommandBufferProxyImpl::OnUpdateState(
591     const gpu::CommandBuffer::State& state) {
592   // Handle wraparound. It works as long as we don't have more than 2B state
593   // updates in flight across which reordering occurs.
594   if (state.generation - last_state_.generation < 0x80000000U)
595     last_state_ = state;
596 }
597
598 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
599     const GpuConsoleMessageCallback& callback) {
600   console_message_callback_ = callback;
601 }
602
603 void CommandBufferProxyImpl::TryUpdateState() {
604   if (last_state_.error == gpu::error::kNoError)
605     shared_state()->Read(&last_state_);
606 }
607
608 }  // namespace content