#ifndef GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
#define GPU_COMMAND_BUFFER_SERVICE_IN_PROCESS_COMMAND_BUFFER_H_
+#include <map>
#include <vector>
+#include "base/atomic_sequence_num.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
+#include "base/containers/scoped_ptr_hash_map.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
+#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/common/command_buffer.h"
-#include "gpu/command_buffer/common/gpu_control.h"
#include "gpu/gpu_export.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/native_widget_types.h"
namespace gfx {
class GLContext;
+class GLShareGroup;
class GLSurface;
class Size;
}
namespace gles2 {
class GLES2Decoder;
+class MailboxManager;
+class ShaderTranslatorCache;
}
-class GpuMemoryBufferFactory;
+class CommandBufferServiceBase;
+class GpuMemoryBufferManager;
class GpuScheduler;
+class ImageFactory;
class TransferBufferManagerInterface;
// This class provides a thread-safe interface to the global GPU service (for
class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
public GpuControl {
public:
- InProcessCommandBuffer();
- virtual ~InProcessCommandBuffer();
-
- // Used to override the GPU thread with explicit scheduling.
- // (By default an internal GPU thread will be spawned to handle all GL work
- // and the two functions are unused.)
- // The callback will be called from different client threads. After the
- // callback is issued, the client is expected to eventually call
- // ProcessGpuWorkOnCurrentThread(). The latter cannot be called from different
- // threads.
- // The callback needs to be set before any context is created.
- static void SetScheduleCallback(const base::Closure& callback);
- static void ProcessGpuWorkOnCurrentThread();
-
- static void EnableVirtualizedContext();
- static void SetGpuMemoryBufferFactory(GpuMemoryBufferFactory* factory);
+ class Service;
+ explicit InProcessCommandBuffer(const scoped_refptr<Service>& service);
+ ~InProcessCommandBuffer() override;
// If |surface| is not NULL, use it directly; in this case, the command
// buffer gpu thread must be the same as the client thread. Otherwise create
// a new GLSurface.
bool Initialize(scoped_refptr<gfx::GLSurface> surface,
bool is_offscreen,
- bool share_resources,
gfx::AcceleratedWidget window,
const gfx::Size& size,
const std::vector<int32>& attribs,
gfx::GpuPreference gpu_preference,
const base::Closure& context_lost_callback,
- unsigned int share_group_id);
+ InProcessCommandBuffer* share_group,
+ GpuMemoryBufferManager* gpu_memory_buffer_manager,
+ ImageFactory* image_factory);
void Destroy();
// CommandBuffer implementation:
- virtual bool Initialize() OVERRIDE;
- virtual State GetState() OVERRIDE;
- virtual State GetLastState() OVERRIDE;
- virtual int32 GetLastToken() OVERRIDE;
- virtual void Flush(int32 put_offset) OVERRIDE;
- virtual State FlushSync(int32 put_offset, int32 last_known_get) OVERRIDE;
- virtual void SetGetBuffer(int32 shm_id) OVERRIDE;
- virtual void SetGetOffset(int32 get_offset) OVERRIDE;
- virtual gpu::Buffer CreateTransferBuffer(size_t size, int32* id) OVERRIDE;
- virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
- virtual gpu::Buffer GetTransferBuffer(int32 id) OVERRIDE;
- virtual void SetToken(int32 token) OVERRIDE;
- virtual void SetParseError(gpu::error::Error error) OVERRIDE;
- virtual void SetContextLostReason(
- gpu::error::ContextLostReason reason) OVERRIDE;
- virtual gpu::error::Error GetLastError() OVERRIDE;
+ bool Initialize() override;
+ State GetLastState() override;
+ int32 GetLastToken() override;
+ void Flush(int32 put_offset) override;
+ void WaitForTokenInRange(int32 start, int32 end) override;
+ void WaitForGetOffsetInRange(int32 start, int32 end) override;
+ void SetGetBuffer(int32 shm_id) override;
+ scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) override;
+ void DestroyTransferBuffer(int32 id) override;
+ gpu::error::Error GetLastError() override;
// GpuControl implementation:
- virtual gpu::Capabilities GetCapabilities() OVERRIDE;
- virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(
- size_t width,
- size_t height,
- unsigned internalformat,
- int32* id) OVERRIDE;
- virtual void DestroyGpuMemoryBuffer(int32 id) OVERRIDE;
- virtual bool GenerateMailboxNames(unsigned num,
- std::vector<gpu::Mailbox>* names) OVERRIDE;
- virtual uint32 InsertSyncPoint() OVERRIDE;
- virtual void SignalSyncPoint(uint32 sync_point,
- const base::Closure& callback) OVERRIDE;
- virtual void SignalQuery(uint32 query,
- const base::Closure& callback) OVERRIDE;
- virtual void SetSurfaceVisible(bool visible) OVERRIDE;
- virtual void SendManagedMemoryStats(const gpu::ManagedMemoryStats& stats)
- OVERRIDE;
- virtual void Echo(const base::Closure& callback) OVERRIDE;
- virtual uint32 CreateStreamTexture(uint32 texture_id) OVERRIDE;
+ gpu::Capabilities GetCapabilities() override;
+ int32 CreateImage(ClientBuffer buffer,
+ size_t width,
+ size_t height,
+ unsigned internalformat) override;
+ void DestroyImage(int32 id) override;
+ int32 CreateGpuMemoryBufferImage(size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage) override;
+ uint32 InsertSyncPoint() override;
+ uint32 InsertFutureSyncPoint() override;
+ void RetireSyncPoint(uint32 sync_point) override;
+ void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) override;
+ void SignalQuery(uint32 query_id, const base::Closure& callback) override;
+ void SetSurfaceVisible(bool visible) override;
+ uint32 CreateStreamTexture(uint32 texture_id) override;
// The serializer interface to the GPU service (i.e. thread).
- class SchedulerClient {
+ class Service {
public:
- virtual ~SchedulerClient() {}
+ Service();
+ virtual ~Service();
- // Queues a task to run as soon as possible.
- virtual void QueueTask(const base::Closure& task) = 0;
+ virtual void AddRef() const = 0;
+ virtual void Release() const = 0;
- // Schedules |callback| to run at an appropriate time for performing idle
- // work.
- virtual void ScheduleIdleWork(const base::Closure& task) = 0;
+ // Queues a task to run as soon as possible.
+ virtual void ScheduleTask(const base::Closure& task) = 0;
+
+ // Schedules |callback| to run at an appropriate time for performing idle
+ // work.
+ virtual void ScheduleIdleWork(const base::Closure& task) = 0;
+
+ virtual bool UseVirtualizedGLContexts() = 0;
+ virtual scoped_refptr<gles2::ShaderTranslatorCache>
+ shader_translator_cache() = 0;
+ scoped_refptr<gles2::MailboxManager> mailbox_manager();
+
+ private:
+ scoped_refptr<gles2::MailboxManager> mailbox_manager_;
};
#if defined(OS_ANDROID)
const std::vector<int32>& attribs;
gfx::GpuPreference gpu_preference;
gpu::Capabilities* capabilities; // Ouptut.
+ InProcessCommandBuffer* context_group;
+ ImageFactory* image_factory;
InitializeOnGpuThreadParams(bool is_offscreen,
gfx::AcceleratedWidget window,
const gfx::Size& size,
const std::vector<int32>& attribs,
gfx::GpuPreference gpu_preference,
- gpu::Capabilities* capabilities)
+ gpu::Capabilities* capabilities,
+ InProcessCommandBuffer* share_group,
+ ImageFactory* image_factory)
: is_offscreen(is_offscreen),
window(window),
size(size),
attribs(attribs),
gpu_preference(gpu_preference),
- capabilities(capabilities) {}
+ capabilities(capabilities),
+ context_group(share_group),
+ image_factory(image_factory) {}
};
bool InitializeOnGpuThread(const InitializeOnGpuThreadParams& params);
bool DestroyOnGpuThread();
void FlushOnGpuThread(int32 put_offset);
+ void ScheduleIdleWorkOnGpuThread();
uint32 CreateStreamTextureOnGpuThread(uint32 client_texture_id);
bool MakeCurrent();
- bool IsContextLost();
base::Closure WrapCallback(const base::Closure& callback);
State GetStateFast();
- void QueueTask(const base::Closure& task) { queue_->QueueTask(task); }
+ void QueueTask(const base::Closure& task) { service_->ScheduleTask(task); }
void CheckSequencedThread();
+ void RetireSyncPointOnGpuThread(uint32 sync_point);
+ void SignalSyncPointOnGpuThread(uint32 sync_point,
+ const base::Closure& callback);
+ bool WaitSyncPointOnGpuThread(uint32 sync_point);
+ void SignalQueryOnGpuThread(unsigned query_id, const base::Closure& callback);
+ void DestroyTransferBufferOnGpuThread(int32 id);
+ void CreateImageOnGpuThread(int32 id,
+ const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ gfx::GpuMemoryBuffer::Format format,
+ uint32 internalformat);
+ void DestroyImageOnGpuThread(int32 id);
// Callbacks:
void OnContextLost();
void OnResizeView(gfx::Size size, float scale_factor);
bool GetBufferChanged(int32 transfer_buffer_id);
void PumpCommands();
- void ScheduleMoreIdleWork();
+ void PerformIdleWork();
+
+ static scoped_refptr<Service> GetDefaultService();
// Members accessed on the gpu thread (possibly with the exception of
// creation):
bool context_lost_;
- bool share_resources_;
scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
scoped_ptr<GpuScheduler> gpu_scheduler_;
scoped_ptr<gles2::GLES2Decoder> decoder_;
scoped_refptr<gfx::GLContext> context_;
scoped_refptr<gfx::GLSurface> surface_;
base::Closure context_lost_callback_;
- unsigned int share_group_id_;
+ bool idle_work_pending_; // Used to throttle PerformIdleWork.
+ ImageFactory* image_factory_;
// Members accessed on the client thread:
State last_state_;
int32 last_put_offset_;
gpu::Capabilities capabilities_;
+ GpuMemoryBufferManager* gpu_memory_buffer_manager_;
+ base::AtomicSequenceNumber next_image_id_;
// Accessed on both threads:
- scoped_ptr<CommandBuffer> command_buffer_;
+ scoped_ptr<CommandBufferServiceBase> command_buffer_;
base::Lock command_buffer_lock_;
base::WaitableEvent flush_event_;
- scoped_ptr<SchedulerClient> queue_;
+ scoped_refptr<Service> service_;
State state_after_last_flush_;
base::Lock state_after_last_flush_lock_;
- scoped_ptr<GpuControl> gpu_control_;
+ scoped_refptr<gfx::GLShareGroup> gl_share_group_;
#if defined(OS_ANDROID)
scoped_ptr<StreamTextureManagerInProcess> stream_texture_manager_;