Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / client / command_buffer_proxy_impl.h
index b018367..5adbbcf 100644 (file)
 #include "base/callback.h"
 #include "base/compiler_specific.h"
 #include "base/containers/hash_tables.h"
+#include "base/containers/scoped_ptr_hash_map.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
 #include "base/observer_list.h"
+#include "gpu/command_buffer/client/gpu_control.h"
 #include "gpu/command_buffer/common/command_buffer.h"
 #include "gpu/command_buffer/common/command_buffer_shared.h"
-#include "gpu/command_buffer/common/gpu_control.h"
 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
 #include "ipc/ipc_listener.h"
-#include "media/video/video_decode_accelerator.h"
 #include "ui/events/latency_info.h"
 
 struct GPUCommandBufferConsoleMessage;
@@ -29,14 +29,15 @@ namespace base {
 class SharedMemory;
 }
 
-namespace gfx {
-class GpuMemoryBuffer;
-}
-
 namespace gpu {
 struct Mailbox;
 }
 
+namespace media {
+class VideoDecodeAccelerator;
+class VideoEncodeAccelerator;
+}
+
 namespace content {
 class GpuChannelHost;
 
@@ -67,11 +68,17 @@ class CommandBufferProxyImpl
   // returns it as an owned pointer to a media::VideoDecodeAccelerator.  Returns
   // NULL on failure to create the GpuVideoDecodeAcceleratorHost.
   // Note that the GpuVideoDecodeAccelerator may still fail to be created in
-  // the GPU process, even if this returns non-NULL. In this case the client is
-  // notified of an error later.
-  scoped_ptr<media::VideoDecodeAccelerator> CreateVideoDecoder(
-      media::VideoCodecProfile profile,
-      media::VideoDecodeAccelerator::Client* client);
+  // the GPU process, even if this returns non-NULL. In this case the VDA client
+  // is notified of an error later, after Initialize().
+  scoped_ptr<media::VideoDecodeAccelerator> CreateVideoDecoder();
+
+  // Sends an IPC message to create a GpuVideoEncodeAccelerator. Creates and
+  // returns it as an owned pointer to a media::VideoEncodeAccelerator.  Returns
+  // NULL on failure to create the GpuVideoEncodeAcceleratorHost.
+  // Note that the GpuVideoEncodeAccelerator may still fail to be created in
+  // the GPU process, even if this returns non-NULL. In this case the VEA client
+  // is notified of an error later, after Initialize();
+  scoped_ptr<media::VideoEncodeAccelerator> CreateVideoEncoder();
 
   // IPC::Listener implementation:
   virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
@@ -79,42 +86,36 @@ class CommandBufferProxyImpl
 
   // CommandBuffer implementation:
   virtual bool Initialize() OVERRIDE;
-  virtual State GetState() OVERRIDE;
   virtual State GetLastState() OVERRIDE;
   virtual int32 GetLastToken() OVERRIDE;
   virtual void Flush(int32 put_offset) OVERRIDE;
-  virtual State FlushSync(int32 put_offset, int32 last_known_get) OVERRIDE;
+  virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+  virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
   virtual void SetGetBuffer(int32 shm_id) OVERRIDE;
-  virtual void SetGetOffset(int32 get_offset) OVERRIDE;
-  virtual gpu::Buffer CreateTransferBuffer(size_t size,
-                                           int32* id) OVERRIDE;
+  virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+                                                          int32* id) OVERRIDE;
   virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
-  virtual gpu::Buffer GetTransferBuffer(int32 id) OVERRIDE;
-  virtual void SetToken(int32 token) OVERRIDE;
-  virtual void SetParseError(gpu::error::Error error) OVERRIDE;
-  virtual void SetContextLostReason(
-      gpu::error::ContextLostReason reason) OVERRIDE;
 
   // gpu::GpuControl implementation:
-  virtual bool SupportsGpuMemoryBuffer() OVERRIDE;
-  virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(
-      size_t width,
-      size_t height,
-      unsigned internalformat,
-      int32* id) OVERRIDE;
+  virtual gpu::Capabilities GetCapabilities() OVERRIDE;
+  virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(size_t width,
+                                                      size_t height,
+                                                      unsigned internalformat,
+                                                      unsigned usage,
+                                                      int32* id) OVERRIDE;
   virtual void DestroyGpuMemoryBuffer(int32 id) OVERRIDE;
-  virtual bool GenerateMailboxNames(unsigned num,
-                                    std::vector<gpu::Mailbox>* names) OVERRIDE;
   virtual uint32 InsertSyncPoint() OVERRIDE;
+  virtual uint32_t InsertFutureSyncPoint() OVERRIDE;
+  virtual void RetireSyncPoint(uint32_t sync_point) OVERRIDE;
   virtual void SignalSyncPoint(uint32 sync_point,
                                const base::Closure& callback) OVERRIDE;
   virtual void SignalQuery(uint32 query,
                            const base::Closure& callback) OVERRIDE;
-  virtual void SendManagedMemoryStats(const gpu::ManagedMemoryStats& stats)
-      OVERRIDE;
+  virtual void SetSurfaceVisible(bool visible) OVERRIDE;
+  virtual void Echo(const base::Closure& callback) OVERRIDE;
+  virtual uint32 CreateStreamTexture(uint32 texture_id) OVERRIDE;
 
   int GetRouteID() const;
-  bool Echo(const base::Closure& callback);
   bool ProduceFrontBuffer(const gpu::Mailbox& mailbox);
   void SetChannelErrorCallback(const base::Closure& callback);
 
@@ -125,16 +126,12 @@ class CommandBufferProxyImpl
   void AddDeletionObserver(DeletionObserver* observer);
   void RemoveDeletionObserver(DeletionObserver* observer);
 
-  bool DiscardBackbuffer();
   bool EnsureBackbuffer();
 
-  // Sends an IPC message with the new state of surface visibility.
-  bool SetSurfaceVisible(bool visible);
-
   void SetOnConsoleMessageCallback(
       const GpuConsoleMessageCallback& callback);
 
-  void SetLatencyInfo(const ui::LatencyInfo& latency_info);
+  void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
 
   // TODO(apatrick): this is a temporary optimization while skia is calling
   // ContentGLContext::MakeCurrent prior to every GL call. It saves returning 6
@@ -144,10 +141,15 @@ class CommandBufferProxyImpl
 
   GpuChannelHost* channel() const { return channel_; }
 
+  base::SharedMemoryHandle GetSharedStateHandle() const {
+    return shared_state_shm_->handle();
+  }
+
  private:
-  typedef std::map<int32, gpu::Buffer> TransferBufferMap;
+  typedef std::map<int32, scoped_refptr<gpu::Buffer> > TransferBufferMap;
   typedef base::hash_map<uint32, base::Closure> SignalTaskMap;
-  typedef std::map<int32, gfx::GpuMemoryBuffer*> GpuMemoryBufferMap;
+  typedef base::ScopedPtrHashMap<int32, gfx::GpuMemoryBuffer>
+      GpuMemoryBufferMap;
 
   // Send an IPC message over the GPU channel. This is private to fully
   // encapsulate the channel; all callers of this function must explicitly
@@ -161,19 +163,12 @@ class CommandBufferProxyImpl
   void OnConsoleMessage(const GPUCommandBufferConsoleMessage& message);
   void OnSetMemoryAllocation(const gpu::MemoryAllocation& allocation);
   void OnSignalSyncPointAck(uint32 id);
-  void OnGenerateMailboxNamesReply(const std::vector<std::string>& names);
 
   // Try to read an updated copy of the state from shared memory.
   void TryUpdateState();
 
   // The shared memory area used to update state.
-  gpu::CommandBufferSharedState* shared_state() const {
-    return reinterpret_cast<gpu::CommandBufferSharedState*>(
-        shared_state_shm_->memory());
-  }
-
-  // Local cache of id to transfer buffer mapping.
-  TransferBufferMap transfer_buffers_;
+  gpu::CommandBufferSharedState* shared_state() const;
 
   // Unowned list of DeletionObservers.
   ObserverList<DeletionObserver> deletion_observers_;
@@ -207,6 +202,10 @@ class CommandBufferProxyImpl
   // Local cache of id to gpu memory buffer mapping.
   GpuMemoryBufferMap gpu_memory_buffers_;
 
+  gpu::Capabilities capabilities_;
+
+  std::vector<ui::LatencyInfo> latency_info_;
+
   DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl);
 };