Upstream version 8.37.180.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / client / gpu_channel_host.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/client/gpu_channel_host.h"
6
7 #include <algorithm>
8
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "ipc/ipc_sync_message_filter.h"
18 #include "url/gurl.h"
19
20 #if defined(OS_WIN)
21 #include "content/public/common/sandbox_init.h"
22 #endif
23
24 using base::AutoLock;
25 using base::MessageLoopProxy;
26
27 namespace content {
28
29 GpuListenerInfo::GpuListenerInfo() {}
30
31 GpuListenerInfo::~GpuListenerInfo() {}
32
33 // static
34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
35     GpuChannelHostFactory* factory,
36     const gpu::GPUInfo& gpu_info,
37     const IPC::ChannelHandle& channel_handle,
38     base::WaitableEvent* shutdown_event) {
39   DCHECK(factory->IsMainThread());
40   scoped_refptr<GpuChannelHost> host = new GpuChannelHost(factory, gpu_info);
41   host->Connect(channel_handle, shutdown_event);
42   return host;
43 }
44
45 // static
46 bool GpuChannelHost::IsValidGpuMemoryBuffer(
47     gfx::GpuMemoryBufferHandle handle) {
48   switch (handle.type) {
49     case gfx::SHARED_MEMORY_BUFFER:
50 #if defined(OS_MACOSX)
51     case gfx::IO_SURFACE_BUFFER:
52 #endif
53 #if defined(OS_ANDROID)
54     case gfx::SURFACE_TEXTURE_BUFFER:
55 #endif
56       return true;
57     default:
58       return false;
59   }
60 }
61
62 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
63                                const gpu::GPUInfo& gpu_info)
64     : factory_(factory),
65       gpu_info_(gpu_info) {
66   next_transfer_buffer_id_.GetNext();
67   next_gpu_memory_buffer_id_.GetNext();
68   next_route_id_.GetNext();
69 }
70
71 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
72                              base::WaitableEvent* shutdown_event) {
73   // Open a channel to the GPU process. We pass NULL as the main listener here
74   // since we need to filter everything to route it to the right thread.
75   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
76   channel_ = IPC::SyncChannel::Create(channel_handle,
77                                       IPC::Channel::MODE_CLIENT,
78                                       NULL,
79                                       io_loop.get(),
80                                       true,
81                                       shutdown_event);
82
83   sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
84
85   channel_->AddFilter(sync_filter_.get());
86
87   channel_filter_ = new MessageFilter();
88
89   // Install the filter last, because we intercept all leftover
90   // messages.
91   channel_->AddFilter(channel_filter_.get());
92 }
93
94 bool GpuChannelHost::Send(IPC::Message* msg) {
95   // Callee takes ownership of message, regardless of whether Send is
96   // successful. See IPC::Sender.
97   scoped_ptr<IPC::Message> message(msg);
98   // The GPU process never sends synchronous IPCs so clear the unblock flag to
99   // preserve order.
100   message->set_unblock(false);
101
102   // Currently we need to choose between two different mechanisms for sending.
103   // On the main thread we use the regular channel Send() method, on another
104   // thread we use SyncMessageFilter. We also have to be careful interpreting
105   // IsMainThread() since it might return false during shutdown,
106   // impl we are actually calling from the main thread (discard message then).
107   //
108   // TODO: Can we just always use sync_filter_ since we setup the channel
109   //       without a main listener?
110   if (factory_->IsMainThread()) {
111     // http://crbug.com/125264
112     base::ThreadRestrictions::ScopedAllowWait allow_wait;
113     bool result = channel_->Send(message.release());
114     if (!result)
115       DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
116     return result;
117   } else if (base::MessageLoop::current()) {
118     bool result = sync_filter_->Send(message.release());
119     if (!result)
120       DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
121     return result;
122   }
123
124   return false;
125 }
126
127 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
128     int32 surface_id,
129     CommandBufferProxyImpl* share_group,
130     const std::vector<int32>& attribs,
131     const GURL& active_url,
132     gfx::GpuPreference gpu_preference) {
133   TRACE_EVENT1("gpu",
134                "GpuChannelHost::CreateViewCommandBuffer",
135                "surface_id",
136                surface_id);
137
138   GPUCreateCommandBufferConfig init_params;
139   init_params.share_group_id =
140       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
141   init_params.attribs = attribs;
142   init_params.active_url = active_url;
143   init_params.gpu_preference = gpu_preference;
144   int32 route_id = GenerateRouteID();
145   if (!factory_->CreateViewCommandBuffer(surface_id, init_params, route_id)) {
146     LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
147
148     // The most likely reason CreateViewCommandBuffer will fail is
149     // that the GPU process crashed. In this case the GPU channel
150     // needs to be considered lost. The caller will then set up a new
151     // connection, and the GPU channel and any view command buffers
152     // will all be associated with the same GPU process.
153     DCHECK(MessageLoopProxy::current().get());
154
155     scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
156     io_loop->PostTask(FROM_HERE,
157                       base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
158                                  channel_filter_.get()));
159
160     return NULL;
161   }
162
163   CommandBufferProxyImpl* command_buffer =
164       new CommandBufferProxyImpl(this, route_id);
165   AddRoute(route_id, command_buffer->AsWeakPtr());
166
167   AutoLock lock(context_lock_);
168   proxies_[route_id] = command_buffer;
169   return command_buffer;
170 }
171
172 CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer(
173     const gfx::Size& size,
174     CommandBufferProxyImpl* share_group,
175     const std::vector<int32>& attribs,
176     const GURL& active_url,
177     gfx::GpuPreference gpu_preference) {
178   TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
179
180   GPUCreateCommandBufferConfig init_params;
181   init_params.share_group_id =
182       share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE;
183   init_params.attribs = attribs;
184   init_params.active_url = active_url;
185   init_params.gpu_preference = gpu_preference;
186   int32 route_id = GenerateRouteID();
187   bool succeeded = false;
188   if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size,
189                                                            init_params,
190                                                            route_id,
191                                                            &succeeded))) {
192     LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer.";
193     return NULL;
194   }
195
196   if (!succeeded) {
197     LOG(ERROR)
198         << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure.";
199     return NULL;
200   }
201
202   CommandBufferProxyImpl* command_buffer =
203       new CommandBufferProxyImpl(this, route_id);
204   AddRoute(route_id, command_buffer->AsWeakPtr());
205
206   AutoLock lock(context_lock_);
207   proxies_[route_id] = command_buffer;
208   return command_buffer;
209 }
210
211 scoped_ptr<media::VideoDecodeAccelerator> GpuChannelHost::CreateVideoDecoder(
212     int command_buffer_route_id) {
213   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoDecoder");
214   AutoLock lock(context_lock_);
215   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
216   DCHECK(it != proxies_.end());
217   return it->second->CreateVideoDecoder();
218 }
219
220 scoped_ptr<media::VideoEncodeAccelerator> GpuChannelHost::CreateVideoEncoder(
221     int command_buffer_route_id) {
222   TRACE_EVENT0("gpu", "GpuChannelHost::CreateVideoEncoder");
223   AutoLock lock(context_lock_);
224   ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
225   DCHECK(it != proxies_.end());
226   return it->second->CreateVideoEncoder();
227 }
228
229 void GpuChannelHost::DestroyCommandBuffer(
230     CommandBufferProxyImpl* command_buffer) {
231   TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
232
233   int route_id = command_buffer->GetRouteID();
234   Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
235   RemoveRoute(route_id);
236
237   AutoLock lock(context_lock_);
238   proxies_.erase(route_id);
239   delete command_buffer;
240 }
241
242 void GpuChannelHost::AddRoute(
243     int route_id, base::WeakPtr<IPC::Listener> listener) {
244   DCHECK(MessageLoopProxy::current().get());
245
246   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
247   io_loop->PostTask(FROM_HERE,
248                     base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
249                                channel_filter_.get(), route_id, listener,
250                                MessageLoopProxy::current()));
251 }
252
253 void GpuChannelHost::RemoveRoute(int route_id) {
254   scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
255   io_loop->PostTask(FROM_HERE,
256                     base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
257                                channel_filter_.get(), route_id));
258 }
259
260 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
261     base::SharedMemoryHandle source_handle) {
262   if (IsLost())
263     return base::SharedMemory::NULLHandle();
264
265 #if defined(OS_WIN)
266   // Windows needs to explicitly duplicate the handle out to another process.
267   base::SharedMemoryHandle target_handle;
268   if (!BrokerDuplicateHandle(source_handle,
269                              channel_->GetPeerPID(),
270                              &target_handle,
271                              FILE_GENERIC_READ | FILE_GENERIC_WRITE,
272                              0)) {
273     return base::SharedMemory::NULLHandle();
274   }
275
276   return target_handle;
277 #else
278   int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
279   if (duped_handle < 0)
280     return base::SharedMemory::NULLHandle();
281
282   return base::FileDescriptor(duped_handle, true);
283 #endif
284 }
285
286 int32 GpuChannelHost::ReserveTransferBufferId() {
287   return next_transfer_buffer_id_.GetNext();
288 }
289
290 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
291     gfx::GpuMemoryBufferHandle source_handle) {
292   switch (source_handle.type) {
293     case gfx::SHARED_MEMORY_BUFFER: {
294       gfx::GpuMemoryBufferHandle handle;
295       handle.type = gfx::SHARED_MEMORY_BUFFER;
296       handle.handle = ShareToGpuProcess(source_handle.handle);
297       return handle;
298     }
299 #if defined(OS_MACOSX)
300     case gfx::IO_SURFACE_BUFFER:
301       return source_handle;
302 #endif
303 #if defined(OS_ANDROID)
304     case gfx::SURFACE_TEXTURE_BUFFER:
305       return source_handle;
306 #endif
307     default:
308       NOTREACHED();
309       return gfx::GpuMemoryBufferHandle();
310   }
311 }
312
313 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
314   return next_gpu_memory_buffer_id_.GetNext();
315 }
316
317 int32 GpuChannelHost::GenerateRouteID() {
318   return next_route_id_.GetNext();
319 }
320
321 GpuChannelHost::~GpuChannelHost() {
322   // channel_ must be destroyed on the main thread.
323   if (!factory_->IsMainThread())
324     factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
325 }
326
327
328 GpuChannelHost::MessageFilter::MessageFilter()
329     : lost_(false) {
330 }
331
332 GpuChannelHost::MessageFilter::~MessageFilter() {}
333
334 void GpuChannelHost::MessageFilter::AddRoute(
335     int route_id,
336     base::WeakPtr<IPC::Listener> listener,
337     scoped_refptr<MessageLoopProxy> loop) {
338   DCHECK(listeners_.find(route_id) == listeners_.end());
339   GpuListenerInfo info;
340   info.listener = listener;
341   info.loop = loop;
342   listeners_[route_id] = info;
343 }
344
345 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
346   ListenerMap::iterator it = listeners_.find(route_id);
347   if (it != listeners_.end())
348     listeners_.erase(it);
349 }
350
351 bool GpuChannelHost::MessageFilter::OnMessageReceived(
352     const IPC::Message& message) {
353   // Never handle sync message replies or we will deadlock here.
354   if (message.is_reply())
355     return false;
356
357   ListenerMap::iterator it = listeners_.find(message.routing_id());
358   if (it == listeners_.end())
359     return false;
360
361   const GpuListenerInfo& info = it->second;
362   info.loop->PostTask(
363       FROM_HERE,
364       base::Bind(
365           base::IgnoreResult(&IPC::Listener::OnMessageReceived),
366           info.listener,
367           message));
368   return true;
369 }
370
371 void GpuChannelHost::MessageFilter::OnChannelError() {
372   // Set the lost state before signalling the proxies. That way, if they
373   // themselves post a task to recreate the context, they will not try to re-use
374   // this channel host.
375   {
376     AutoLock lock(lock_);
377     lost_ = true;
378   }
379
380   // Inform all the proxies that an error has occurred. This will be reported
381   // via OpenGL as a lost context.
382   for (ListenerMap::iterator it = listeners_.begin();
383        it != listeners_.end();
384        it++) {
385     const GpuListenerInfo& info = it->second;
386     info.loop->PostTask(
387         FROM_HERE,
388         base::Bind(&IPC::Listener::OnChannelError, info.listener));
389   }
390
391   listeners_.clear();
392 }
393
394 bool GpuChannelHost::MessageFilter::IsLost() const {
395   AutoLock lock(lock_);
396   return lost_;
397 }
398
399 }  // namespace content