Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / client / command_buffer_proxy_impl.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
6
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/common/view_messages.h"
18 #include "gpu/command_buffer/common/cmd_buffer_common.h"
19 #include "gpu/command_buffer/common/command_buffer_shared.h"
20 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
21 #include "ui/gfx/size.h"
22
23 namespace content {
24
25 CommandBufferProxyImpl::CommandBufferProxyImpl(
26     GpuChannelHost* channel,
27     int route_id)
28     : channel_(channel),
29       route_id_(route_id),
30       flush_count_(0),
31       last_put_offset_(-1),
32       next_signal_id_(0) {
33 }
34
35 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
36   FOR_EACH_OBSERVER(DeletionObserver,
37                     deletion_observers_,
38                     OnWillDeleteImpl());
39 }
40
41 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
42   bool handled = true;
43   IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
44     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
45     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
46     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
47     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
48                         OnSetMemoryAllocation);
49     IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
50                         OnSignalSyncPointAck);
51     IPC_MESSAGE_UNHANDLED(handled = false)
52   IPC_END_MESSAGE_MAP()
53
54   DCHECK(handled);
55   return handled;
56 }
57
58 void CommandBufferProxyImpl::OnChannelError() {
59   OnDestroyed(gpu::error::kUnknown);
60 }
61
62 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
63   // Prevent any further messages from being sent.
64   channel_ = NULL;
65
66   // When the client sees that the context is lost, they should delete this
67   // CommandBufferProxyImpl and create a new one.
68   last_state_.error = gpu::error::kLostContext;
69   last_state_.context_lost_reason = reason;
70
71   if (!channel_error_callback_.is_null()) {
72     channel_error_callback_.Run();
73     // Avoid calling the error callback more than once.
74     channel_error_callback_.Reset();
75   }
76 }
77
78 void CommandBufferProxyImpl::OnEchoAck() {
79   DCHECK(!echo_tasks_.empty());
80   base::Closure callback = echo_tasks_.front();
81   echo_tasks_.pop();
82   callback.Run();
83 }
84
85 void CommandBufferProxyImpl::OnConsoleMessage(
86     const GPUCommandBufferConsoleMessage& message) {
87   if (!console_message_callback_.is_null()) {
88     console_message_callback_.Run(message.message, message.id);
89   }
90 }
91
92 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
93     const MemoryAllocationChangedCallback& callback) {
94   if (last_state_.error != gpu::error::kNoError)
95     return;
96
97   memory_allocation_changed_callback_ = callback;
98   Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
99       route_id_, !memory_allocation_changed_callback_.is_null()));
100 }
101
102 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
103   deletion_observers_.AddObserver(observer);
104 }
105
106 void CommandBufferProxyImpl::RemoveDeletionObserver(
107     DeletionObserver* observer) {
108   deletion_observers_.RemoveObserver(observer);
109 }
110
111 void CommandBufferProxyImpl::OnSetMemoryAllocation(
112     const gpu::MemoryAllocation& allocation) {
113   if (!memory_allocation_changed_callback_.is_null())
114     memory_allocation_changed_callback_.Run(allocation);
115 }
116
117 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
118   SignalTaskMap::iterator it = signal_tasks_.find(id);
119   DCHECK(it != signal_tasks_.end());
120   base::Closure callback = it->second;
121   signal_tasks_.erase(it);
122   callback.Run();
123 }
124
125 void CommandBufferProxyImpl::SetChannelErrorCallback(
126     const base::Closure& callback) {
127   channel_error_callback_ = callback;
128 }
129
130 bool CommandBufferProxyImpl::Initialize() {
131   TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize");
132   shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
133       sizeof(*shared_state())).release());
134   if (!shared_state_shm_)
135     return false;
136
137   if (!shared_state_shm_->Map(sizeof(*shared_state())))
138     return false;
139
140   shared_state()->Initialize();
141
142   // This handle is owned by the GPU process and must be passed to it or it
143   // will leak. In otherwords, do not early out on error between here and the
144   // sending of the Initialize IPC below.
145   base::SharedMemoryHandle handle =
146       channel_->ShareToGpuProcess(shared_state_shm_->handle());
147   if (!base::SharedMemory::IsHandleValid(handle))
148     return false;
149
150   bool result = false;
151   if (!Send(new GpuCommandBufferMsg_Initialize(
152       route_id_, handle, &result, &capabilities_))) {
153     LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
154     return false;
155   }
156
157   if (!result) {
158     LOG(ERROR) << "Failed to initialize command buffer service.";
159     return false;
160   }
161
162   capabilities_.map_image = true;
163
164   return true;
165 }
166
167 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
168   return last_state_;
169 }
170
171 int32 CommandBufferProxyImpl::GetLastToken() {
172   TryUpdateState();
173   return last_state_.token;
174 }
175
176 void CommandBufferProxyImpl::Flush(int32 put_offset) {
177   if (last_state_.error != gpu::error::kNoError)
178     return;
179
180   TRACE_EVENT1("gpu",
181                "CommandBufferProxyImpl::Flush",
182                "put_offset",
183                put_offset);
184
185   if (last_put_offset_ == put_offset)
186     return;
187
188   last_put_offset_ = put_offset;
189
190   Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
191                                           put_offset,
192                                           ++flush_count_));
193 }
194
195 void CommandBufferProxyImpl::SetLatencyInfo(
196     const std::vector<ui::LatencyInfo>& latency_info) {
197   if (last_state_.error != gpu::error::kNoError)
198     return;
199   Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
200 }
201
202 void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
203   TRACE_EVENT2("gpu",
204                "CommandBufferProxyImpl::WaitForToken",
205                "start",
206                start,
207                "end",
208                end);
209   TryUpdateState();
210   if (!InRange(start, end, last_state_.token) &&
211       last_state_.error == gpu::error::kNoError) {
212     gpu::CommandBuffer::State state;
213     if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(
214             route_id_, start, end, &state)))
215       OnUpdateState(state);
216   }
217   DCHECK(InRange(start, end, last_state_.token) ||
218          last_state_.error != gpu::error::kNoError);
219 }
220
221 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) {
222   TRACE_EVENT2("gpu",
223                "CommandBufferProxyImpl::WaitForGetOffset",
224                "start",
225                start,
226                "end",
227                end);
228   TryUpdateState();
229   if (!InRange(start, end, last_state_.get_offset) &&
230       last_state_.error == gpu::error::kNoError) {
231     gpu::CommandBuffer::State state;
232     if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange(
233             route_id_, start, end, &state)))
234       OnUpdateState(state);
235   }
236   DCHECK(InRange(start, end, last_state_.get_offset) ||
237          last_state_.error != gpu::error::kNoError);
238 }
239
240 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
241   if (last_state_.error != gpu::error::kNoError)
242     return;
243
244   Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
245   last_put_offset_ = -1;
246 }
247
248 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
249     size_t size,
250     int32* id) {
251   *id = -1;
252
253   if (last_state_.error != gpu::error::kNoError)
254     return NULL;
255
256   int32 new_id = channel_->ReserveTransferBufferId();
257
258   scoped_ptr<base::SharedMemory> shared_memory(
259       channel_->factory()->AllocateSharedMemory(size));
260   if (!shared_memory)
261     return NULL;
262
263   DCHECK(!shared_memory->memory());
264   if (!shared_memory->Map(size))
265     return NULL;
266
267   // This handle is owned by the GPU process and must be passed to it or it
268   // will leak. In otherwords, do not early out on error between here and the
269   // sending of the RegisterTransferBuffer IPC below.
270   base::SharedMemoryHandle handle =
271       channel_->ShareToGpuProcess(shared_memory->handle());
272   if (!base::SharedMemory::IsHandleValid(handle))
273     return NULL;
274
275   if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
276                                                            new_id,
277                                                            handle,
278                                                            size))) {
279     return NULL;
280   }
281
282   *id = new_id;
283   scoped_refptr<gpu::Buffer> buffer(
284       gpu::MakeBufferFromSharedMemory(shared_memory.Pass(), size));
285   return buffer;
286 }
287
288 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
289   if (last_state_.error != gpu::error::kNoError)
290     return;
291
292   Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
293 }
294
295 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() {
296   return capabilities_;
297 }
298
299 gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
300     size_t width,
301     size_t height,
302     unsigned internalformat,
303     unsigned usage,
304     int32* id) {
305   *id = -1;
306
307   if (last_state_.error != gpu::error::kNoError)
308     return NULL;
309
310   scoped_ptr<gfx::GpuMemoryBuffer> buffer(
311       channel_->factory()->AllocateGpuMemoryBuffer(
312           width, height, internalformat, usage));
313   if (!buffer)
314     return NULL;
315
316   DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(buffer->GetHandle()));
317
318   int32 new_id = channel_->ReserveGpuMemoryBufferId();
319
320   // This handle is owned by the GPU process and must be passed to it or it
321   // will leak. In otherwords, do not early out on error between here and the
322   // sending of the RegisterGpuMemoryBuffer IPC below.
323   gfx::GpuMemoryBufferHandle handle =
324       channel_->ShareGpuMemoryBufferToGpuProcess(buffer->GetHandle());
325
326   if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
327                 route_id_,
328                 new_id,
329                 handle,
330                 width,
331                 height,
332                 internalformat))) {
333     return NULL;
334   }
335
336   *id = new_id;
337   DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
338   return gpu_memory_buffers_.add(new_id, buffer.Pass()).first->second;
339 }
340
341 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
342   if (last_state_.error != gpu::error::kNoError)
343     return;
344
345   Send(new GpuCommandBufferMsg_UnregisterGpuMemoryBuffer(route_id_, id));
346
347   // Remove the gpu memory buffer from the client side cache.
348   DCHECK(gpu_memory_buffers_.find(id) != gpu_memory_buffers_.end());
349   channel_->factory()->DeleteGpuMemoryBuffer(gpu_memory_buffers_.take(id));
350 }
351
352 int CommandBufferProxyImpl::GetRouteID() const {
353   return route_id_;
354 }
355
356 void CommandBufferProxyImpl::Echo(const base::Closure& callback) {
357   if (last_state_.error != gpu::error::kNoError) {
358     return;
359   }
360
361   if (!Send(new GpuCommandBufferMsg_Echo(
362            route_id_, GpuCommandBufferMsg_EchoAck(route_id_)))) {
363     return;
364   }
365
366   echo_tasks_.push(callback);
367 }
368
369 uint32 CommandBufferProxyImpl::CreateStreamTexture(uint32 texture_id) {
370   if (last_state_.error != gpu::error::kNoError)
371     return 0;
372
373   int32 stream_id = channel_->GenerateRouteID();
374   bool succeeded = false;
375   Send(new GpuCommandBufferMsg_CreateStreamTexture(
376       route_id_, texture_id, stream_id, &succeeded));
377   if (!succeeded) {
378     DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure";
379     return 0;
380   }
381   return stream_id;
382 }
383
384 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
385   if (last_state_.error != gpu::error::kNoError)
386     return 0;
387
388   uint32 sync_point = 0;
389   Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, true, &sync_point));
390   return sync_point;
391 }
392
393 uint32_t CommandBufferProxyImpl::InsertFutureSyncPoint() {
394   if (last_state_.error != gpu::error::kNoError)
395     return 0;
396
397   uint32 sync_point = 0;
398   Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, false, &sync_point));
399   return sync_point;
400 }
401
402 void CommandBufferProxyImpl::RetireSyncPoint(uint32_t sync_point) {
403   if (last_state_.error != gpu::error::kNoError)
404     return;
405
406   Send(new GpuCommandBufferMsg_RetireSyncPoint(route_id_, sync_point));
407 }
408
409 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
410                                              const base::Closure& callback) {
411   if (last_state_.error != gpu::error::kNoError)
412     return;
413
414   uint32 signal_id = next_signal_id_++;
415   if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
416                                                     sync_point,
417                                                     signal_id))) {
418     return;
419   }
420
421   signal_tasks_.insert(std::make_pair(signal_id, callback));
422 }
423
424 void CommandBufferProxyImpl::SignalQuery(uint32 query,
425                                          const base::Closure& callback) {
426   if (last_state_.error != gpu::error::kNoError)
427     return;
428
429   // Signal identifiers are hidden, so nobody outside of this class will see
430   // them. (And thus, they cannot save them.) The IDs themselves only last
431   // until the callback is invoked, which will happen as soon as the GPU
432   // catches upwith the command buffer.
433   // A malicious caller trying to create a collision by making next_signal_id
434   // would have to make calls at an astounding rate (300B/s) and even if they
435   // could do that, all they would do is to prevent some callbacks from getting
436   // called, leading to stalled threads and/or memory leaks.
437   uint32 signal_id = next_signal_id_++;
438   if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
439                                                 query,
440                                                 signal_id))) {
441     return;
442   }
443
444   signal_tasks_.insert(std::make_pair(signal_id, callback));
445 }
446
447 void CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
448   if (last_state_.error != gpu::error::kNoError)
449     return;
450
451   Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
452 }
453
454 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
455   if (last_state_.error != gpu::error::kNoError)
456     return false;
457
458   return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
459 }
460
461 scoped_ptr<media::VideoDecodeAccelerator>
462 CommandBufferProxyImpl::CreateVideoDecoder() {
463   if (!channel_)
464     return scoped_ptr<media::VideoDecodeAccelerator>();
465   return scoped_ptr<media::VideoDecodeAccelerator>(
466       new GpuVideoDecodeAcceleratorHost(channel_, this));
467 }
468
469 scoped_ptr<media::VideoEncodeAccelerator>
470 CommandBufferProxyImpl::CreateVideoEncoder() {
471   if (!channel_)
472     return scoped_ptr<media::VideoEncodeAccelerator>();
473   return scoped_ptr<media::VideoEncodeAccelerator>(
474       new GpuVideoEncodeAcceleratorHost(channel_, this));
475 }
476
477 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
478   return last_state_.error;
479 }
480
481 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
482   // Caller should not intentionally send a message if the context is lost.
483   DCHECK(last_state_.error == gpu::error::kNoError);
484
485   if (channel_) {
486     if (channel_->Send(msg)) {
487       return true;
488     } else {
489       // Flag the command buffer as lost. Defer deleting the channel until
490       // OnChannelError is called after returning to the message loop in case
491       // it is referenced elsewhere.
492       DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
493       last_state_.error = gpu::error::kLostContext;
494       return false;
495     }
496   }
497
498   // Callee takes ownership of message, regardless of whether Send is
499   // successful. See IPC::Sender.
500   delete msg;
501   return false;
502 }
503
504 void CommandBufferProxyImpl::OnUpdateState(
505     const gpu::CommandBuffer::State& state) {
506   // Handle wraparound. It works as long as we don't have more than 2B state
507   // updates in flight across which reordering occurs.
508   if (state.generation - last_state_.generation < 0x80000000U)
509     last_state_ = state;
510 }
511
512 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
513     const GpuConsoleMessageCallback& callback) {
514   console_message_callback_ = callback;
515 }
516
517 void CommandBufferProxyImpl::TryUpdateState() {
518   if (last_state_.error == gpu::error::kNoError)
519     shared_state()->Read(&last_state_);
520 }
521
522 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
523   return reinterpret_cast<gpu::CommandBufferSharedState*>(
524       shared_state_shm_->memory());
525 }
526
527 }  // namespace content