- add sources.
[platform/framework/web/crosswalk.git] / src / content / common / gpu / client / command_buffer_proxy_impl.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
6
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/gpu_messages.h"
16 #include "content/common/view_messages.h"
17 #include "gpu/command_buffer/common/cmd_buffer_common.h"
18 #include "gpu/command_buffer/common/command_buffer_shared.h"
19 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
20 #include "ui/gfx/size.h"
21
22 namespace content {
23
24 CommandBufferProxyImpl::CommandBufferProxyImpl(
25     GpuChannelHost* channel,
26     int route_id)
27     : channel_(channel),
28       route_id_(route_id),
29       flush_count_(0),
30       last_put_offset_(-1),
31       next_signal_id_(0) {
32 }
33
34 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
35   FOR_EACH_OBSERVER(DeletionObserver,
36                     deletion_observers_,
37                     OnWillDeleteImpl());
38
39   // Delete all the locally cached shared memory objects, closing the handle
40   // in this process.
41   for (TransferBufferMap::iterator it = transfer_buffers_.begin();
42        it != transfer_buffers_.end();
43        ++it) {
44     delete it->second.shared_memory;
45     it->second.shared_memory = NULL;
46   }
47 }
48
49 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
50   bool handled = true;
51   IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
52     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
53     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
54     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
55     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
56                         OnSetMemoryAllocation);
57     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
58                         OnSignalSyncPointAck);
59     IPC_MESSAGE_UNHANDLED(handled = false)
60   IPC_END_MESSAGE_MAP()
61
62   DCHECK(handled);
63   return handled;
64 }
65
66 void CommandBufferProxyImpl::OnChannelError() {
67   OnDestroyed(gpu::error::kUnknown);
68 }
69
70 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
71   // Prevent any further messages from being sent.
72   channel_ = NULL;
73
74   // When the client sees that the context is lost, they should delete this
75   // CommandBufferProxyImpl and create a new one.
76   last_state_.error = gpu::error::kLostContext;
77   last_state_.context_lost_reason = reason;
78
79   if (!channel_error_callback_.is_null()) {
80     channel_error_callback_.Run();
81     // Avoid calling the error callback more than once.
82     channel_error_callback_.Reset();
83   }
84 }
85
86 void CommandBufferProxyImpl::OnEchoAck() {
87   DCHECK(!echo_tasks_.empty());
88   base::Closure callback = echo_tasks_.front();
89   echo_tasks_.pop();
90   callback.Run();
91 }
92
93 void CommandBufferProxyImpl::OnConsoleMessage(
94     const GPUCommandBufferConsoleMessage& message) {
95   if (!console_message_callback_.is_null()) {
96     console_message_callback_.Run(message.message, message.id);
97   }
98 }
99
100 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
101     const MemoryAllocationChangedCallback& callback) {
102   if (last_state_.error != gpu::error::kNoError)
103     return;
104
105   memory_allocation_changed_callback_ = callback;
106   Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
107       route_id_, !memory_allocation_changed_callback_.is_null()));
108 }
109
110 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
111   deletion_observers_.AddObserver(observer);
112 }
113
114 void CommandBufferProxyImpl::RemoveDeletionObserver(
115     DeletionObserver* observer) {
116   deletion_observers_.RemoveObserver(observer);
117 }
118
119 void CommandBufferProxyImpl::OnSetMemoryAllocation(
120     const gpu::MemoryAllocation& allocation) {
121   if (!memory_allocation_changed_callback_.is_null())
122     memory_allocation_changed_callback_.Run(allocation);
123 }
124
125 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
126   SignalTaskMap::iterator it = signal_tasks_.find(id);
127   DCHECK(it != signal_tasks_.end());
128   base::Closure callback = it->second;
129   signal_tasks_.erase(it);
130   callback.Run();
131 }
132
133 void CommandBufferProxyImpl::SetChannelErrorCallback(
134     const base::Closure& callback) {
135   channel_error_callback_ = callback;
136 }
137
138 bool CommandBufferProxyImpl::Initialize() {
139   shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
140       sizeof(*shared_state())).release());
141   if (!shared_state_shm_)
142     return false;
143
144   if (!shared_state_shm_->Map(sizeof(*shared_state())))
145     return false;
146
147   shared_state()->Initialize();
148
149   // This handle is owned by the GPU process and must be passed to it or it
150   // will leak. In otherwords, do not early out on error between here and the
151   // sending of the Initialize IPC below.
152   base::SharedMemoryHandle handle =
153       channel_->ShareToGpuProcess(shared_state_shm_->handle());
154   if (!base::SharedMemory::IsHandleValid(handle))
155     return false;
156
157   bool result;
158   if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, handle, &result))) {
159     LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
160     return false;
161   }
162
163   if (!result) {
164     LOG(ERROR) << "Failed to initialize command buffer service.";
165     return false;
166   }
167
168   return true;
169 }
170
171 gpu::CommandBuffer::State CommandBufferProxyImpl::GetState() {
172   // Send will flag state with lost context if IPC fails.
173   if (last_state_.error == gpu::error::kNoError) {
174     gpu::CommandBuffer::State state;
175     if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
176       OnUpdateState(state);
177   }
178
179   TryUpdateState();
180   return last_state_;
181 }
182
183 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
184   return last_state_;
185 }
186
187 int32 CommandBufferProxyImpl::GetLastToken() {
188   TryUpdateState();
189   return last_state_.token;
190 }
191
192 void CommandBufferProxyImpl::Flush(int32 put_offset) {
193   if (last_state_.error != gpu::error::kNoError)
194     return;
195
196   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
197                "CommandBufferProxyImpl::Flush",
198                "put_offset",
199                put_offset);
200
201   if (last_put_offset_ == put_offset)
202     return;
203
204   last_put_offset_ = put_offset;
205
206   Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
207                                           put_offset,
208                                           ++flush_count_));
209 }
210
211 void CommandBufferProxyImpl::SetLatencyInfo(
212     const ui::LatencyInfo& latency_info) {
213   if (last_state_.error != gpu::error::kNoError)
214     return;
215   Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
216 }
217
218 gpu::CommandBuffer::State CommandBufferProxyImpl::FlushSync(
219     int32 put_offset,
220     int32 last_known_get) {
221   TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
222                put_offset);
223   Flush(put_offset);
224   TryUpdateState();
225   if (last_known_get == last_state_.get_offset) {
226     // Send will flag state with lost context if IPC fails.
227     if (last_state_.error == gpu::error::kNoError) {
228       gpu::CommandBuffer::State state;
229       if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_,
230                                                     &state)))
231         OnUpdateState(state);
232     }
233     TryUpdateState();
234   }
235
236   return last_state_;
237 }
238
239 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
240   if (last_state_.error != gpu::error::kNoError)
241     return;
242
243   Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
244   last_put_offset_ = -1;
245 }
246
247 void CommandBufferProxyImpl::SetGetOffset(int32 get_offset) {
248   // Not implemented in proxy.
249   NOTREACHED();
250 }
251
252 gpu::Buffer CommandBufferProxyImpl::CreateTransferBuffer(size_t size,
253                                                          int32* id) {
254   *id = -1;
255
256   if (last_state_.error != gpu::error::kNoError)
257     return gpu::Buffer();
258
259   int32 new_id = channel_->ReserveTransferBufferId();
260   DCHECK(transfer_buffers_.find(new_id) == transfer_buffers_.end());
261
262   scoped_ptr<base::SharedMemory> shared_memory(
263       channel_->factory()->AllocateSharedMemory(size));
264   if (!shared_memory)
265     return gpu::Buffer();
266
267   DCHECK(!shared_memory->memory());
268   if (!shared_memory->Map(size))
269     return gpu::Buffer();
270
271   // This handle is owned by the GPU process and must be passed to it or it
272   // will leak. In otherwords, do not early out on error between here and the
273   // sending of the RegisterTransferBuffer IPC below.
274   base::SharedMemoryHandle handle =
275       channel_->ShareToGpuProcess(shared_memory->handle());
276   if (!base::SharedMemory::IsHandleValid(handle))
277     return gpu::Buffer();
278
279   if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
280                                                            new_id,
281                                                            handle,
282                                                            size))) {
283     return gpu::Buffer();
284   }
285
286   *id = new_id;
287   gpu::Buffer buffer;
288   buffer.ptr = shared_memory->memory();
289   buffer.size = size;
290   buffer.shared_memory = shared_memory.release();
291   transfer_buffers_[new_id] = buffer;
292
293   return buffer;
294 }
295
296 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
297   if (last_state_.error != gpu::error::kNoError)
298     return;
299
300   // Remove the transfer buffer from the client side cache.
301   TransferBufferMap::iterator it = transfer_buffers_.find(id);
302   if (it != transfer_buffers_.end()) {
303     delete it->second.shared_memory;
304     transfer_buffers_.erase(it);
305   }
306
307   Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
308 }
309
310 gpu::Buffer CommandBufferProxyImpl::GetTransferBuffer(int32 id) {
311   if (last_state_.error != gpu::error::kNoError)
312     return gpu::Buffer();
313
314   // Check local cache to see if there is already a client side shared memory
315   // object for this id.
316   TransferBufferMap::iterator it = transfer_buffers_.find(id);
317   if (it != transfer_buffers_.end()) {
318     return it->second;
319   }
320
321   // Assuming we are in the renderer process, the service is responsible for
322   // duplicating the handle. This might not be true for NaCl.
323   base::SharedMemoryHandle handle = base::SharedMemoryHandle();
324   uint32 size;
325   if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_,
326                                                       id,
327                                                       &handle,
328                                                       &size))) {
329     return gpu::Buffer();
330   }
331
332   // Cache the transfer buffer shared memory object client side.
333   scoped_ptr<base::SharedMemory> shared_memory(
334       new base::SharedMemory(handle, false));
335
336   // Map the shared memory on demand.
337   if (!shared_memory->memory()) {
338     if (!shared_memory->Map(size))
339       return gpu::Buffer();
340   }
341
342   gpu::Buffer buffer;
343   buffer.ptr = shared_memory->memory();
344   buffer.size = size;
345   buffer.shared_memory = shared_memory.release();
346   transfer_buffers_[id] = buffer;
347
348   return buffer;
349 }
350
351 void CommandBufferProxyImpl::SetToken(int32 token) {
352   // Not implemented in proxy.
353   NOTREACHED();
354 }
355
356 void CommandBufferProxyImpl::SetParseError(
357     gpu::error::Error error) {
358   // Not implemented in proxy.
359   NOTREACHED();
360 }
361
362 void CommandBufferProxyImpl::SetContextLostReason(
363     gpu::error::ContextLostReason reason) {
364   // Not implemented in proxy.
365   NOTREACHED();
366 }
367
368 bool CommandBufferProxyImpl::SupportsGpuMemoryBuffer() {
369   return true;
370 }
371
372 gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
373     size_t width,
374     size_t height,
375     unsigned internalformat,
376     int32* id) {
377   *id = -1;
378
379   if (last_state_.error != gpu::error::kNoError)
380     return NULL;
381
382   int32 new_id = channel_->ReserveGpuMemoryBufferId();
383   DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
384
385   scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer(
386       channel_->factory()->AllocateGpuMemoryBuffer(width,
387                                                    height,
388                                                    internalformat));
389   if (!gpu_memory_buffer)
390     return NULL;
391
392   DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
393              gpu_memory_buffer->GetHandle()));
394
395   // This handle is owned by the GPU process and must be passed to it or it
396   // will leak. In otherwords, do not early out on error between here and the
397   // sending of the RegisterGpuMemoryBuffer IPC below.
398   gfx::GpuMemoryBufferHandle handle =
399       channel_->ShareGpuMemoryBufferToGpuProcess(
400           gpu_memory_buffer->GetHandle());
401
402   if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
403                 route_id_,
404                 new_id,
405                 handle,
406                 width,
407                 height,
408                 internalformat))) {
409     return NULL;
410   }
411
412   *id = new_id;
413   gpu_memory_buffers_[new_id] = gpu_memory_buffer.release();
414   return gpu_memory_buffers_[new_id];
415 }
416
417 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
418   if (last_state_.error != gpu::error::kNoError)
419     return;
420
421   // Remove the gpu memory buffer from the client side cache.
422   GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
423   if (it != gpu_memory_buffers_.end()) {
424     delete it->second;
425     gpu_memory_buffers_.erase(it);
426   }
427
428   Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_, id));
429 }
430
431 int CommandBufferProxyImpl::GetRouteID() const {
432   return route_id_;
433 }
434
435 bool CommandBufferProxyImpl::Echo(const base::Closure& callback) {
436   if (last_state_.error != gpu::error::kNoError) {
437     return false;
438   }
439
440   if (!Send(new GpuCommandBufferMsg_Echo(route_id_,
441                     GpuCommandBufferMsg_EchoAck(route_id_)))) {
442     return false;
443   }
444
445   echo_tasks_.push(callback);
446
447   return true;
448 }
449
450 bool CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
451   if (last_state_.error != gpu::error::kNoError)
452     return false;
453
454   return Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
455 }
456
457 bool CommandBufferProxyImpl::DiscardBackbuffer() {
458   if (last_state_.error != gpu::error::kNoError)
459     return false;
460
461   return Send(new GpuCommandBufferMsg_DiscardBackbuffer(route_id_));
462 }
463
464 bool CommandBufferProxyImpl::EnsureBackbuffer() {
465   if (last_state_.error != gpu::error::kNoError)
466     return false;
467
468   return Send(new GpuCommandBufferMsg_EnsureBackbuffer(route_id_));
469 }
470
471 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
472   if (last_state_.error != gpu::error::kNoError)
473     return 0;
474
475   uint32 sync_point = 0;
476   Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
477   return sync_point;
478 }
479
480 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
481                                              const base::Closure& callback) {
482   if (last_state_.error != gpu::error::kNoError)
483     return;
484
485   uint32 signal_id = next_signal_id_++;
486   if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
487                                                     sync_point,
488                                                     signal_id))) {
489     return;
490   }
491
492   signal_tasks_.insert(std::make_pair(signal_id, callback));
493 }
494
495 void CommandBufferProxyImpl::SignalQuery(uint32 query,
496                                          const base::Closure& callback) {
497   if (last_state_.error != gpu::error::kNoError)
498     return;
499
500   // Signal identifiers are hidden, so nobody outside of this class will see
501   // them. (And thus, they cannot save them.) The IDs themselves only last
502   // until the callback is invoked, which will happen as soon as the GPU
503   // catches upwith the command buffer.
504   // A malicious caller trying to create a collision by making next_signal_id
505   // would have to make calls at an astounding rate (300B/s) and even if they
506   // could do that, all they would do is to prevent some callbacks from getting
507   // called, leading to stalled threads and/or memory leaks.
508   uint32 signal_id = next_signal_id_++;
509   if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
510                                                 query,
511                                                 signal_id))) {
512     return;
513   }
514
515   signal_tasks_.insert(std::make_pair(signal_id, callback));
516 }
517
518 void CommandBufferProxyImpl::SendManagedMemoryStats(
519     const gpu::ManagedMemoryStats& stats) {
520   if (last_state_.error != gpu::error::kNoError)
521     return;
522
523   Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_,
524                                                             stats));
525 }
526
527 bool CommandBufferProxyImpl::GenerateMailboxNames(
528     unsigned num,
529     std::vector<gpu::Mailbox>* names) {
530   if (last_state_.error != gpu::error::kNoError)
531     return false;
532
533   return channel_->GenerateMailboxNames(num, names);
534 }
535
536 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
537   if (last_state_.error != gpu::error::kNoError)
538     return false;
539
540   return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
541 }
542
543 scoped_ptr<media::VideoDecodeAccelerator>
544 CommandBufferProxyImpl::CreateVideoDecoder(
545     media::VideoCodecProfile profile,
546     media::VideoDecodeAccelerator::Client* client) {
547   int decoder_route_id;
548   scoped_ptr<media::VideoDecodeAccelerator> vda;
549   if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile,
550                                                        &decoder_route_id))) {
551     LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
552     return vda.Pass();
553   }
554
555   if (decoder_route_id < 0) {
556     DLOG(ERROR) << "Failed to Initialize GPU decoder on profile: " << profile;
557     return vda.Pass();
558   }
559
560   GpuVideoDecodeAcceleratorHost* decoder_host =
561       new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client,
562                                         this);
563   vda.reset(decoder_host);
564   return vda.Pass();
565 }
566
567 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
568   return last_state_.error;
569 }
570
571 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
572   // Caller should not intentionally send a message if the context is lost.
573   DCHECK(last_state_.error == gpu::error::kNoError);
574
575   if (channel_) {
576     if (channel_->Send(msg)) {
577       return true;
578     } else {
579       // Flag the command buffer as lost. Defer deleting the channel until
580       // OnChannelError is called after returning to the message loop in case
581       // it is referenced elsewhere.
582       last_state_.error = gpu::error::kLostContext;
583       return false;
584     }
585   }
586
587   // Callee takes ownership of message, regardless of whether Send is
588   // successful. See IPC::Sender.
589   delete msg;
590   return false;
591 }
592
593 void CommandBufferProxyImpl::OnUpdateState(
594     const gpu::CommandBuffer::State& state) {
595   // Handle wraparound. It works as long as we don't have more than 2B state
596   // updates in flight across which reordering occurs.
597   if (state.generation - last_state_.generation < 0x80000000U)
598     last_state_ = state;
599 }
600
601 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
602     const GpuConsoleMessageCallback& callback) {
603   console_message_callback_ = callback;
604 }
605
606 void CommandBufferProxyImpl::TryUpdateState() {
607   if (last_state_.error == gpu::error::kNoError)
608     shared_state()->Read(&last_state_);
609 }
610
611 }  // namespace content