1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
7 #include "base/callback.h"
8 #include "base/debug/trace_event.h"
9 #include "base/logging.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/stl_util.h"
12 #include "content/common/child_process_messages.h"
13 #include "content/common/gpu/client/gpu_channel_host.h"
14 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h"
15 #include "content/common/gpu/gpu_messages.h"
16 #include "content/common/view_messages.h"
17 #include "gpu/command_buffer/common/cmd_buffer_common.h"
18 #include "gpu/command_buffer/common/command_buffer_shared.h"
19 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
20 #include "ui/gfx/size.h"
24 CommandBufferProxyImpl::CommandBufferProxyImpl(
25 GpuChannelHost* channel,
34 CommandBufferProxyImpl::~CommandBufferProxyImpl() {
35 FOR_EACH_OBSERVER(DeletionObserver,
39 // Delete all the locally cached shared memory objects, closing the handle
41 for (TransferBufferMap::iterator it = transfer_buffers_.begin();
42 it != transfer_buffers_.end();
44 delete it->second.shared_memory;
45 it->second.shared_memory = NULL;
49 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
51 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
52 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
53 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EchoAck, OnEchoAck);
54 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
55 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetMemoryAllocation,
56 OnSetMemoryAllocation);
57 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPointAck,
58 OnSignalSyncPointAck);
59 IPC_MESSAGE_UNHANDLED(handled = false)
66 void CommandBufferProxyImpl::OnChannelError() {
67 OnDestroyed(gpu::error::kUnknown);
70 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason) {
71 // Prevent any further messages from being sent.
74 // When the client sees that the context is lost, they should delete this
75 // CommandBufferProxyImpl and create a new one.
76 last_state_.error = gpu::error::kLostContext;
77 last_state_.context_lost_reason = reason;
79 if (!channel_error_callback_.is_null()) {
80 channel_error_callback_.Run();
81 // Avoid calling the error callback more than once.
82 channel_error_callback_.Reset();
86 void CommandBufferProxyImpl::OnEchoAck() {
87 DCHECK(!echo_tasks_.empty());
88 base::Closure callback = echo_tasks_.front();
93 void CommandBufferProxyImpl::OnConsoleMessage(
94 const GPUCommandBufferConsoleMessage& message) {
95 if (!console_message_callback_.is_null()) {
96 console_message_callback_.Run(message.message, message.id);
100 void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
101 const MemoryAllocationChangedCallback& callback) {
102 if (last_state_.error != gpu::error::kNoError)
105 memory_allocation_changed_callback_ = callback;
106 Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
107 route_id_, !memory_allocation_changed_callback_.is_null()));
110 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) {
111 deletion_observers_.AddObserver(observer);
114 void CommandBufferProxyImpl::RemoveDeletionObserver(
115 DeletionObserver* observer) {
116 deletion_observers_.RemoveObserver(observer);
119 void CommandBufferProxyImpl::OnSetMemoryAllocation(
120 const gpu::MemoryAllocation& allocation) {
121 if (!memory_allocation_changed_callback_.is_null())
122 memory_allocation_changed_callback_.Run(allocation);
125 void CommandBufferProxyImpl::OnSignalSyncPointAck(uint32 id) {
126 SignalTaskMap::iterator it = signal_tasks_.find(id);
127 DCHECK(it != signal_tasks_.end());
128 base::Closure callback = it->second;
129 signal_tasks_.erase(it);
133 void CommandBufferProxyImpl::SetChannelErrorCallback(
134 const base::Closure& callback) {
135 channel_error_callback_ = callback;
138 bool CommandBufferProxyImpl::Initialize() {
139 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory(
140 sizeof(*shared_state())).release());
141 if (!shared_state_shm_)
144 if (!shared_state_shm_->Map(sizeof(*shared_state())))
147 shared_state()->Initialize();
149 // This handle is owned by the GPU process and must be passed to it or it
150 // will leak. In otherwords, do not early out on error between here and the
151 // sending of the Initialize IPC below.
152 base::SharedMemoryHandle handle =
153 channel_->ShareToGpuProcess(shared_state_shm_->handle());
154 if (!base::SharedMemory::IsHandleValid(handle))
158 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, handle, &result))) {
159 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize.";
164 LOG(ERROR) << "Failed to initialize command buffer service.";
171 gpu::CommandBuffer::State CommandBufferProxyImpl::GetState() {
172 // Send will flag state with lost context if IPC fails.
173 if (last_state_.error == gpu::error::kNoError) {
174 gpu::CommandBuffer::State state;
175 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
176 OnUpdateState(state);
183 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
187 int32 CommandBufferProxyImpl::GetLastToken() {
189 return last_state_.token;
192 void CommandBufferProxyImpl::Flush(int32 put_offset) {
193 if (last_state_.error != gpu::error::kNoError)
196 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.debug"),
197 "CommandBufferProxyImpl::Flush",
201 if (last_put_offset_ == put_offset)
204 last_put_offset_ = put_offset;
206 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
211 void CommandBufferProxyImpl::SetLatencyInfo(
212 const ui::LatencyInfo& latency_info) {
213 if (last_state_.error != gpu::error::kNoError)
215 Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
218 gpu::CommandBuffer::State CommandBufferProxyImpl::FlushSync(
220 int32 last_known_get) {
221 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::FlushSync", "put_offset",
225 if (last_known_get == last_state_.get_offset) {
226 // Send will flag state with lost context if IPC fails.
227 if (last_state_.error == gpu::error::kNoError) {
228 gpu::CommandBuffer::State state;
229 if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_,
231 OnUpdateState(state);
239 void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) {
240 if (last_state_.error != gpu::error::kNoError)
243 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
244 last_put_offset_ = -1;
247 void CommandBufferProxyImpl::SetGetOffset(int32 get_offset) {
248 // Not implemented in proxy.
252 gpu::Buffer CommandBufferProxyImpl::CreateTransferBuffer(size_t size,
256 if (last_state_.error != gpu::error::kNoError)
257 return gpu::Buffer();
259 int32 new_id = channel_->ReserveTransferBufferId();
260 DCHECK(transfer_buffers_.find(new_id) == transfer_buffers_.end());
262 scoped_ptr<base::SharedMemory> shared_memory(
263 channel_->factory()->AllocateSharedMemory(size));
265 return gpu::Buffer();
267 DCHECK(!shared_memory->memory());
268 if (!shared_memory->Map(size))
269 return gpu::Buffer();
271 // This handle is owned by the GPU process and must be passed to it or it
272 // will leak. In otherwords, do not early out on error between here and the
273 // sending of the RegisterTransferBuffer IPC below.
274 base::SharedMemoryHandle handle =
275 channel_->ShareToGpuProcess(shared_memory->handle());
276 if (!base::SharedMemory::IsHandleValid(handle))
277 return gpu::Buffer();
279 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
283 return gpu::Buffer();
288 buffer.ptr = shared_memory->memory();
290 buffer.shared_memory = shared_memory.release();
291 transfer_buffers_[new_id] = buffer;
296 void CommandBufferProxyImpl::DestroyTransferBuffer(int32 id) {
297 if (last_state_.error != gpu::error::kNoError)
300 // Remove the transfer buffer from the client side cache.
301 TransferBufferMap::iterator it = transfer_buffers_.find(id);
302 if (it != transfer_buffers_.end()) {
303 delete it->second.shared_memory;
304 transfer_buffers_.erase(it);
307 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
310 gpu::Buffer CommandBufferProxyImpl::GetTransferBuffer(int32 id) {
311 if (last_state_.error != gpu::error::kNoError)
312 return gpu::Buffer();
314 // Check local cache to see if there is already a client side shared memory
315 // object for this id.
316 TransferBufferMap::iterator it = transfer_buffers_.find(id);
317 if (it != transfer_buffers_.end()) {
321 // Assuming we are in the renderer process, the service is responsible for
322 // duplicating the handle. This might not be true for NaCl.
323 base::SharedMemoryHandle handle = base::SharedMemoryHandle();
325 if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_,
329 return gpu::Buffer();
332 // Cache the transfer buffer shared memory object client side.
333 scoped_ptr<base::SharedMemory> shared_memory(
334 new base::SharedMemory(handle, false));
336 // Map the shared memory on demand.
337 if (!shared_memory->memory()) {
338 if (!shared_memory->Map(size))
339 return gpu::Buffer();
343 buffer.ptr = shared_memory->memory();
345 buffer.shared_memory = shared_memory.release();
346 transfer_buffers_[id] = buffer;
351 void CommandBufferProxyImpl::SetToken(int32 token) {
352 // Not implemented in proxy.
356 void CommandBufferProxyImpl::SetParseError(
357 gpu::error::Error error) {
358 // Not implemented in proxy.
362 void CommandBufferProxyImpl::SetContextLostReason(
363 gpu::error::ContextLostReason reason) {
364 // Not implemented in proxy.
368 bool CommandBufferProxyImpl::SupportsGpuMemoryBuffer() {
372 gfx::GpuMemoryBuffer* CommandBufferProxyImpl::CreateGpuMemoryBuffer(
375 unsigned internalformat,
379 if (last_state_.error != gpu::error::kNoError)
382 int32 new_id = channel_->ReserveGpuMemoryBufferId();
383 DCHECK(gpu_memory_buffers_.find(new_id) == gpu_memory_buffers_.end());
385 scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer(
386 channel_->factory()->AllocateGpuMemoryBuffer(width,
389 if (!gpu_memory_buffer)
392 DCHECK(GpuChannelHost::IsValidGpuMemoryBuffer(
393 gpu_memory_buffer->GetHandle()));
395 // This handle is owned by the GPU process and must be passed to it or it
396 // will leak. In otherwords, do not early out on error between here and the
397 // sending of the RegisterGpuMemoryBuffer IPC below.
398 gfx::GpuMemoryBufferHandle handle =
399 channel_->ShareGpuMemoryBufferToGpuProcess(
400 gpu_memory_buffer->GetHandle());
402 if (!Send(new GpuCommandBufferMsg_RegisterGpuMemoryBuffer(
413 gpu_memory_buffers_[new_id] = gpu_memory_buffer.release();
414 return gpu_memory_buffers_[new_id];
417 void CommandBufferProxyImpl::DestroyGpuMemoryBuffer(int32 id) {
418 if (last_state_.error != gpu::error::kNoError)
421 // Remove the gpu memory buffer from the client side cache.
422 GpuMemoryBufferMap::iterator it = gpu_memory_buffers_.find(id);
423 if (it != gpu_memory_buffers_.end()) {
425 gpu_memory_buffers_.erase(it);
428 Send(new GpuCommandBufferMsg_DestroyGpuMemoryBuffer(route_id_, id));
431 int CommandBufferProxyImpl::GetRouteID() const {
435 bool CommandBufferProxyImpl::Echo(const base::Closure& callback) {
436 if (last_state_.error != gpu::error::kNoError) {
440 if (!Send(new GpuCommandBufferMsg_Echo(route_id_,
441 GpuCommandBufferMsg_EchoAck(route_id_)))) {
445 echo_tasks_.push(callback);
450 bool CommandBufferProxyImpl::SetSurfaceVisible(bool visible) {
451 if (last_state_.error != gpu::error::kNoError)
454 return Send(new GpuCommandBufferMsg_SetSurfaceVisible(route_id_, visible));
457 bool CommandBufferProxyImpl::DiscardBackbuffer() {
458 if (last_state_.error != gpu::error::kNoError)
461 return Send(new GpuCommandBufferMsg_DiscardBackbuffer(route_id_));
464 bool CommandBufferProxyImpl::EnsureBackbuffer() {
465 if (last_state_.error != gpu::error::kNoError)
468 return Send(new GpuCommandBufferMsg_EnsureBackbuffer(route_id_));
471 uint32 CommandBufferProxyImpl::InsertSyncPoint() {
472 if (last_state_.error != gpu::error::kNoError)
475 uint32 sync_point = 0;
476 Send(new GpuCommandBufferMsg_InsertSyncPoint(route_id_, &sync_point));
480 void CommandBufferProxyImpl::SignalSyncPoint(uint32 sync_point,
481 const base::Closure& callback) {
482 if (last_state_.error != gpu::error::kNoError)
485 uint32 signal_id = next_signal_id_++;
486 if (!Send(new GpuCommandBufferMsg_SignalSyncPoint(route_id_,
492 signal_tasks_.insert(std::make_pair(signal_id, callback));
495 void CommandBufferProxyImpl::SignalQuery(uint32 query,
496 const base::Closure& callback) {
497 if (last_state_.error != gpu::error::kNoError)
500 // Signal identifiers are hidden, so nobody outside of this class will see
501 // them. (And thus, they cannot save them.) The IDs themselves only last
502 // until the callback is invoked, which will happen as soon as the GPU
503 // catches upwith the command buffer.
504 // A malicious caller trying to create a collision by making next_signal_id
505 // would have to make calls at an astounding rate (300B/s) and even if they
506 // could do that, all they would do is to prevent some callbacks from getting
507 // called, leading to stalled threads and/or memory leaks.
508 uint32 signal_id = next_signal_id_++;
509 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_,
515 signal_tasks_.insert(std::make_pair(signal_id, callback));
518 void CommandBufferProxyImpl::SendManagedMemoryStats(
519 const gpu::ManagedMemoryStats& stats) {
520 if (last_state_.error != gpu::error::kNoError)
523 Send(new GpuCommandBufferMsg_SendClientManagedMemoryStats(route_id_,
527 bool CommandBufferProxyImpl::GenerateMailboxNames(
529 std::vector<gpu::Mailbox>* names) {
530 if (last_state_.error != gpu::error::kNoError)
533 return channel_->GenerateMailboxNames(num, names);
536 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) {
537 if (last_state_.error != gpu::error::kNoError)
540 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox));
543 scoped_ptr<media::VideoDecodeAccelerator>
544 CommandBufferProxyImpl::CreateVideoDecoder(
545 media::VideoCodecProfile profile,
546 media::VideoDecodeAccelerator::Client* client) {
547 int decoder_route_id;
548 scoped_ptr<media::VideoDecodeAccelerator> vda;
549 if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, profile,
550 &decoder_route_id))) {
551 LOG(ERROR) << "Send(GpuCommandBufferMsg_CreateVideoDecoder) failed";
555 if (decoder_route_id < 0) {
556 DLOG(ERROR) << "Failed to Initialize GPU decoder on profile: " << profile;
560 GpuVideoDecodeAcceleratorHost* decoder_host =
561 new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client,
563 vda.reset(decoder_host);
567 gpu::error::Error CommandBufferProxyImpl::GetLastError() {
568 return last_state_.error;
571 bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
572 // Caller should not intentionally send a message if the context is lost.
573 DCHECK(last_state_.error == gpu::error::kNoError);
576 if (channel_->Send(msg)) {
579 // Flag the command buffer as lost. Defer deleting the channel until
580 // OnChannelError is called after returning to the message loop in case
581 // it is referenced elsewhere.
582 last_state_.error = gpu::error::kLostContext;
587 // Callee takes ownership of message, regardless of whether Send is
588 // successful. See IPC::Sender.
593 void CommandBufferProxyImpl::OnUpdateState(
594 const gpu::CommandBuffer::State& state) {
595 // Handle wraparound. It works as long as we don't have more than 2B state
596 // updates in flight across which reordering occurs.
597 if (state.generation - last_state_.generation < 0x80000000U)
601 void CommandBufferProxyImpl::SetOnConsoleMessageCallback(
602 const GpuConsoleMessageCallback& callback) {
603 console_message_callback_ = callback;
606 void CommandBufferProxyImpl::TryUpdateState() {
607 if (last_state_.error == gpu::error::kNoError)
608 shared_state()->Read(&last_state_);
611 } // namespace content