1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/client/gpu_channel_host.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
21 #include "content/public/common/sandbox_init.h"
25 using base::MessageLoopProxy;
29 GpuListenerInfo::GpuListenerInfo() {}
31 GpuListenerInfo::~GpuListenerInfo() {}
34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
35 GpuChannelHostFactory* factory,
36 const gpu::GPUInfo& gpu_info,
37 const IPC::ChannelHandle& channel_handle,
38 base::WaitableEvent* shutdown_event) {
39 DCHECK(factory->IsMainThread());
40 scoped_refptr<GpuChannelHost> host = new GpuChannelHost(factory, gpu_info);
41 host->Connect(channel_handle, shutdown_event);
46 bool GpuChannelHost::IsValidGpuMemoryBuffer(
47 gfx::GpuMemoryBufferHandle handle) {
48 switch (handle.type) {
49 case gfx::SHARED_MEMORY_BUFFER:
50 #if defined(OS_MACOSX)
51 case gfx::IO_SURFACE_BUFFER:
53 #if defined(OS_ANDROID)
54 case gfx::SURFACE_TEXTURE_BUFFER:
62 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
63 const gpu::GPUInfo& gpu_info)
66 next_transfer_buffer_id_.GetNext();
67 next_gpu_memory_buffer_id_.GetNext();
68 next_route_id_.GetNext();
71 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
72 base::WaitableEvent* shutdown_event) {
73 // Open a channel to the GPU process. We pass NULL as the main listener here
74 // since we need to filter everything to route it to the right thread.
75 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
76 channel_ = IPC::SyncChannel::Create(channel_handle,
77 IPC::Channel::MODE_CLIENT,
83 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
85 channel_->AddFilter(sync_filter_.get());
87 channel_filter_ = new MessageFilter();
89 // Install the filter last, because we intercept all leftover
91 channel_->AddFilter(channel_filter_.get());
94 bool GpuChannelHost::Send(IPC::Message* msg) {
95 // Callee takes ownership of message, regardless of whether Send is
96 // successful. See IPC::Sender.
97 scoped_ptr<IPC::Message> message(msg);
98 // The GPU process never sends synchronous IPCs so clear the unblock flag to
100 message->set_unblock(false);
102 // Currently we need to choose between two different mechanisms for sending.
103 // On the main thread we use the regular channel Send() method, on another
104 // thread we use SyncMessageFilter. We also have to be careful interpreting
105 // IsMainThread() since it might return false during shutdown,
106 // impl we are actually calling from the main thread (discard message then).
108 // TODO: Can we just always use sync_filter_ since we setup the channel
109 // without a main listener?
110 if (factory_->IsMainThread()) {
111 // http://crbug.com/125264
112 base::ThreadRestrictions::ScopedAllowWait allow_wait;
113 bool result = channel_->Send(message.release());
115 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
117 } else if (base::MessageLoop::current()) {
118 bool result = sync_filter_->Send(message.release());
120 DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
127 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
129 CommandBufferProxyImpl* share_group,
130 const std::vector<int32>& attribs,
131 const GURL& active_url,
132 gfx::GpuPreference gpu_preference) {
134 "GpuChannelHost::CreateViewCommandBuffer",
138 GPUCreateCommandBufferConfig init_params;
139 init_params.share_group_id =
140 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
141 init_params.attribs = attribs;
142 init_params.active_url = active_url;
143 init_params.gpu_preference = gpu_preference;
144 int32 route_id = GenerateRouteID();
145 if (!factory_->CreateViewCommandBuffer(surface_id, init_params, route_id)) {
146 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
148 // The most likely reason CreateViewCommandBuffer will fail is
149 // that the GPU process crashed. In this case the GPU channel
150 // needs to be considered lost. The caller will then set up a new
151 // connection, and the GPU channel and any view command buffers
152 // will all be associated with the same GPU process.
153 DCHECK(MessageLoopProxy::current().get());
155 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
156 io_loop->PostTask(FROM_HERE,
157 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
158 channel_filter_.get()));
163 CommandBufferProxyImpl* command_buffer =
164 new CommandBufferProxyImpl(this, route_id);
165 AddRoute(route_id, command_buffer->AsWeakPtr());
167 AutoLock lock(context_lock_);
168 proxies_[route_id] = command_buffer;
169 return command_buffer;
172 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
173 const gfx::Size& size,
174 CommandBufferProxyImpl* share_group,
175 const std::vector<int32>& attribs,
176 const GURL& active_url,
177 gfx::GpuPreference gpu_preference) {
178 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
180 GPUCreateCommandBufferConfig init_params;
181 init_params.share_group_id =
182 share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
183 init_params.attribs = attribs;
184 init_params.active_url = active_url;
185 init_params.gpu_preference = gpu_preference;
186 int32 route_id = GenerateRouteID();
187 bool succeeded = false;
188 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
192 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
198 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
202 CommandBufferProxyImpl* command_buffer =
203 new CommandBufferProxyImpl(this, route_id);
204 AddRoute(route_id, command_buffer->AsWeakPtr());
206 AutoLock lock(context_lock_);
207 proxies_[route_id] = command_buffer;
208 return command_buffer;
211 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
212 int command_buffer_route_id) {
213 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
214 AutoLock lock(context_lock_);
215 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
216 DCHECK(it != proxies_.end());
217 return it->second->CreateVideoDecoder();
220 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
221 int command_buffer_route_id) {
222 TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
223 AutoLock lock(context_lock_);
224 ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
225 DCHECK(it != proxies_.end());
226 return it->second->CreateVideoEncoder();
229 void GpuChannelHost::DestroyCommandBuffer(
230 CommandBufferProxyImpl* command_buffer) {
231 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
233 int route_id = command_buffer->GetRouteID();
234 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
235 RemoveRoute(route_id);
237 AutoLock lock(context_lock_);
238 proxies_.erase(route_id);
239 delete command_buffer;
242 void GpuChannelHost::AddRoute(
243 int route_id, base::WeakPtr<IPC::Listener> listener) {
244 DCHECK(MessageLoopProxy::current().get());
246 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
247 io_loop->PostTask(FROM_HERE,
248 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
249 channel_filter_.get(), route_id, listener,
250 MessageLoopProxy::current()));
253 void GpuChannelHost::RemoveRoute(int route_id) {
254 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
255 io_loop->PostTask(FROM_HERE,
256 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
257 channel_filter_.get(), route_id));
260 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
261 base::SharedMemoryHandle source_handle) {
263 return base::SharedMemory::NULLHandle();
266 // Windows needs to explicitly duplicate the handle out to another process.
267 base::SharedMemoryHandle target_handle;
268 if (!BrokerDuplicateHandle(source_handle,
269 channel_->GetPeerPID(),
271 FILE_GENERIC_READ | FILE_GENERIC_WRITE,
273 return base::SharedMemory::NULLHandle();
276 return target_handle;
278 int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
279 if (duped_handle < 0)
280 return base::SharedMemory::NULLHandle();
282 return base::FileDescriptor(duped_handle, true);
286 int32 GpuChannelHost::ReserveTransferBufferId() {
287 return next_transfer_buffer_id_.GetNext();
290 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
291 gfx::GpuMemoryBufferHandle source_handle) {
292 switch (source_handle.type) {
293 case gfx::SHARED_MEMORY_BUFFER: {
294 gfx::GpuMemoryBufferHandle handle;
295 handle.type = gfx::SHARED_MEMORY_BUFFER;
296 handle.handle = ShareToGpuProcess(source_handle.handle);
299 #if defined(OS_MACOSX)
300 case gfx::IO_SURFACE_BUFFER:
301 return source_handle;
303 #if defined(OS_ANDROID)
304 case gfx::SURFACE_TEXTURE_BUFFER:
305 return source_handle;
309 return gfx::GpuMemoryBufferHandle();
313 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
314 return next_gpu_memory_buffer_id_.GetNext();
317 int32 GpuChannelHost::GenerateRouteID() {
318 return next_route_id_.GetNext();
321 GpuChannelHost::~GpuChannelHost() {
322 // channel_ must be destroyed on the main thread.
323 if (!factory_->IsMainThread())
324 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
328 GpuChannelHost::MessageFilter::MessageFilter()
332 GpuChannelHost::MessageFilter::~MessageFilter() {}
334 void GpuChannelHost::MessageFilter::AddRoute(
336 base::WeakPtr<IPC::Listener> listener,
337 scoped_refptr<MessageLoopProxy> loop) {
338 DCHECK(listeners_.find(route_id) == listeners_.end());
339 GpuListenerInfo info;
340 info.listener = listener;
342 listeners_[route_id] = info;
345 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
346 ListenerMap::iterator it = listeners_.find(route_id);
347 if (it != listeners_.end())
348 listeners_.erase(it);
351 bool GpuChannelHost::MessageFilter::OnMessageReceived(
352 const IPC::Message& message) {
353 // Never handle sync message replies or we will deadlock here.
354 if (message.is_reply())
357 ListenerMap::iterator it = listeners_.find(message.routing_id());
358 if (it == listeners_.end())
361 const GpuListenerInfo& info = it->second;
365 base::IgnoreResult(&IPC::Listener::OnMessageReceived),
371 void GpuChannelHost::MessageFilter::OnChannelError() {
372 // Set the lost state before signalling the proxies. That way, if they
373 // themselves post a task to recreate the context, they will not try to re-use
374 // this channel host.
376 AutoLock lock(lock_);
380 // Inform all the proxies that an error has occurred. This will be reported
381 // via OpenGL as a lost context.
382 for (ListenerMap::iterator it = listeners_.begin();
383 it != listeners_.end();
385 const GpuListenerInfo& info = it->second;
388 base::Bind(&IPC::Listener::OnChannelError, info.listener));
394 bool GpuChannelHost::MessageFilter::IsLost() const {
395 AutoLock lock(lock_);
399 } // namespace content