#include "content/common/gpu/gpu_watchdog.h"
#include "content/common/gpu/image_transport_surface.h"
#include "content/common/gpu/media/gpu_video_decode_accelerator.h"
+#include "content/common/gpu/media/gpu_video_encode_accelerator.h"
#include "content/common/gpu/sync_point_manager.h"
#include "content/public/common/content_client.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/service/gpu_control_service.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/logger.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/query_manager.h"
#include "ui/gl/gl_bindings.h"
#endif
namespace content {
+struct WaitForCommandState {
+ WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
+ : start(start), end(end), reply(reply) {}
+
+ int32 start;
+ int32 end;
+ scoped_ptr<IPC::Message> reply;
+};
+
namespace {
// The GpuCommandBufferMemoryTracker class provides a bridge between the
total_gpu_memory_(0) {
active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
FastSetActiveURL(active_url_, active_url_hash_);
+
+ gpu::gles2::ContextCreationAttribHelper attrib_parser;
+ attrib_parser.Parse(requested_attribs_);
+
if (share_group) {
context_group_ = share_group->context_group_;
+ DCHECK(context_group_->bind_generates_resource() ==
+ attrib_parser.bind_generates_resource_);
} else {
context_group_ = new gpu::gles2::ContextGroup(
mailbox_manager,
image_manager,
new GpuCommandBufferMemoryTracker(channel),
+ channel_->gpu_channel_manager()->shader_translator_cache(),
NULL,
- true);
+ attrib_parser.bind_generates_resource_);
}
use_virtualized_gl_context_ |=
}
bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
- devtools_gpu_instrumentation::ScopedGpuTask task(this);
+ devtools_gpu_instrumentation::ScopedGpuTask task(channel());
FastSetActiveURL(active_url_, active_url_hash_);
+ bool have_context = false;
// Ensure the appropriate GL context is current before handling any IPC
// messages directed at the command buffer. This ensures that the message
// handler can assume that the context is current (not necessary for
// Echo, RetireSyncPoint, or WaitSyncPoint).
- if (decoder_.get() &&
- message.type() != GpuCommandBufferMsg_Echo::ID &&
- message.type() != GpuCommandBufferMsg_GetStateFast::ID &&
+ if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID &&
+ message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
+ message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
if (!MakeCurrent())
return false;
+ have_context = true;
}
// Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
OnProduceFrontBuffer);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast,
- OnGetStateFast);
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
+ OnWaitForTokenInRange);
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
+ OnWaitForGetOffsetInRange);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
OnRegisterTransferBuffer);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
OnDestroyTransferBuffer);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
- OnGetTransferBuffer);
IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
OnCreateVideoDecoder)
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
+ OnCreateVideoEncoder)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
OnSetSurfaceVisible)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
- // Ensure that any delayed work that was created will be handled.
- ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
+ CheckCompleteWaits();
+
+ if (have_context) {
+ // Ensure that any delayed work that was created will be handled.
+ ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
+ }
DCHECK(handled);
return handled;
}
void GpuCommandBufferStub::Destroy() {
+ if (wait_for_token_) {
+ Send(wait_for_token_->reply.release());
+ wait_for_token_.reset();
+ }
+ if (wait_for_get_offset_) {
+ Send(wait_for_get_offset_->reply.release());
+ wait_for_get_offset_.reset();
+ }
if (handle_.is_null() && !active_url_.is_empty()) {
GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
bool have_context = false;
if (decoder_ && command_buffer_ &&
- command_buffer_->GetState().error != gpu::error::kLostContext)
+ command_buffer_->GetLastState().error != gpu::error::kLostContext)
have_context = decoder_->MakeCurrent();
FOR_EACH_OBSERVER(DestructionObserver,
destruction_observers_,
command_buffer_.reset(new gpu::CommandBufferService(
context_group_->transfer_buffer_manager()));
- if (!command_buffer_->Initialize()) {
- DLOG(ERROR) << "CommandBufferService failed to initialize.\n";
- OnInitializeFailed(reply_message);
- return;
- }
+ bool result = command_buffer_->Initialize();
+ DCHECK(result);
decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
if (!handle_.is_null()) {
#if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
if (software_) {
- DLOG(ERROR) << "No software support.\n";
+ LOG(ERROR) << "No software support.";
OnInitializeFailed(reply_message);
return;
}
}
if (!surface_.get()) {
- DLOG(ERROR) << "Failed to create surface.\n";
+ DLOG(ERROR) << "Failed to create surface.";
OnInitializeFailed(reply_message);
return;
}
channel_->share_group(),
channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
gpu_preference_);
+ if (!context.get()) {
+ DLOG(ERROR) << "Failed to create shared context for virtualization.";
+ OnInitializeFailed(reply_message);
+ return;
+ }
channel_->share_group()->SetSharedContext(context.get());
}
// This should be a non-virtual GL context.
channel_->share_group(), surface_.get(), gpu_preference_);
}
if (!context.get()) {
- DLOG(ERROR) << "Failed to create context.\n";
+ DLOG(ERROR) << "Failed to create context.";
OnInitializeFailed(reply_message);
return;
}
return;
}
- gpu_control_.reset(
- new gpu::GpuControlService(context_group_->image_manager(),
- NULL,
- context_group_->mailbox_manager(),
- NULL,
- decoder_->GetCapabilities()));
+ gpu_control_service_.reset(
+ new gpu::GpuControlService(context_group_->image_manager(), NULL));
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableGPUServiceLogging)) {
base::Unretained(this)));
}
- if (!command_buffer_->SetSharedStateBuffer(shared_state_shm.Pass())) {
- DLOG(ERROR) << "Failed to map shared stae buffer.";
+ const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
+ if (!shared_state_shm->Map(kSharedStateSize)) {
+ DLOG(ERROR) << "Failed to map shared state buffer.";
OnInitializeFailed(reply_message);
return;
}
+ command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
+ shared_state_shm.Pass(), kSharedStateSize));
GpuCommandBufferMsg_Initialize::WriteReplyParams(
- reply_message, true, gpu_control_->GetCapabilities());
+ reply_message, true, decoder_->GetCapabilities());
Send(reply_message);
if (handle_.is_null() && !active_url_.is_empty()) {
latency_info_callback_.Run(latency_info);
}
-void GpuCommandBufferStub::OnCreateStreamTexture(uint32 texture_id,
- int32* stream_id) {
+void GpuCommandBufferStub::OnCreateStreamTexture(
+ uint32 texture_id, int32 stream_id, bool* succeeded) {
#if defined(OS_ANDROID)
- *stream_id = StreamTexture::Create(this, texture_id);
+ *succeeded = StreamTexture::Create(this, texture_id, stream_id);
#else
- *stream_id = 0;
+ *succeeded = false;
#endif
}
decoder_->ProduceFrontBuffer(mailbox);
}
-void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState");
- if (command_buffer_) {
- gpu::CommandBuffer::State state = command_buffer_->GetState();
- CheckContextLost();
- GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
- } else {
- DLOG(ERROR) << "no command_buffer.";
- reply_message->set_reply_error();
- }
- Send(reply_message);
-}
-
void GpuCommandBufferStub::OnParseError() {
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
DCHECK(command_buffer_.get());
- gpu::CommandBuffer::State state = command_buffer_->GetState();
+ gpu::CommandBuffer::State state = command_buffer_->GetLastState();
IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
route_id_, state.context_lost_reason);
msg->set_unblock(true);
CheckContextLost();
}
-void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast");
+void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
+ int32 end,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
DCHECK(command_buffer_.get());
CheckContextLost();
- gpu::CommandBuffer::State state = command_buffer_->GetState();
- GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state);
- Send(reply_message);
+ if (wait_for_token_)
+ LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
+ wait_for_token_ =
+ make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
+ CheckCompleteWaits();
+}
+
+void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
+ int32 start,
+ int32 end,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
+ DCHECK(command_buffer_.get());
+ CheckContextLost();
+ if (wait_for_get_offset_) {
+ LOG(ERROR)
+ << "Got WaitForGetOffset command while currently waiting for offset.";
+ }
+ wait_for_get_offset_ =
+ make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
+ CheckCompleteWaits();
+}
+
+void GpuCommandBufferStub::CheckCompleteWaits() {
+ if (wait_for_token_ || wait_for_get_offset_) {
+ gpu::CommandBuffer::State state = command_buffer_->GetLastState();
+ if (wait_for_token_ &&
+ (gpu::CommandBuffer::InRange(
+ wait_for_token_->start, wait_for_token_->end, state.token) ||
+ state.error != gpu::error::kNoError)) {
+ ReportState();
+ GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
+ wait_for_token_->reply.get(), state);
+ Send(wait_for_token_->reply.release());
+ wait_for_token_.reset();
+ }
+ if (wait_for_get_offset_ &&
+ (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
+ wait_for_get_offset_->end,
+ state.get_offset) ||
+ state.error != gpu::error::kNoError)) {
+ ReportState();
+ GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
+ wait_for_get_offset_->reply.get(), state);
+ Send(wait_for_get_offset_->reply.release());
+ wait_for_get_offset_.reset();
+ }
+ }
}
-void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset,
- uint32 flush_count) {
- TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush",
- "put_offset", put_offset);
+void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
+ TRACE_EVENT1(
+ "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
DCHECK(command_buffer_.get());
if (flush_count - last_flush_count_ < 0x8000000U) {
last_flush_count_ = flush_count;
base::SharedMemoryHandle transfer_buffer,
uint32 size) {
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
- base::SharedMemory shared_memory(transfer_buffer, false);
- if (command_buffer_)
- command_buffer_->RegisterTransferBuffer(id, &shared_memory, size);
+ // Take ownership of the memory and map it into this process.
+ // This validates the size.
+ scoped_ptr<base::SharedMemory> shared_memory(
+ new base::SharedMemory(transfer_buffer, false));
+ if (!shared_memory->Map(size)) {
+ DVLOG(0) << "Failed to map shared memory.";
+ return;
+ }
+
+ if (command_buffer_) {
+ command_buffer_->RegisterTransferBuffer(
+ id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
+ }
}
void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
command_buffer_->DestroyTransferBuffer(id);
}
-void GpuCommandBufferStub::OnGetTransferBuffer(
- int32 id,
- IPC::Message* reply_message) {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer");
- if (command_buffer_) {
- base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
- uint32 size = 0;
-
- gpu::Buffer buffer = command_buffer_->GetTransferBuffer(id);
- if (buffer.shared_memory) {
-#if defined(OS_WIN)
- transfer_buffer = NULL;
- BrokerDuplicateHandle(buffer.shared_memory->handle(),
- channel_->renderer_pid(), &transfer_buffer, FILE_MAP_READ |
- FILE_MAP_WRITE, 0);
- DCHECK(transfer_buffer != NULL);
-#else
- buffer.shared_memory->ShareToProcess(channel_->renderer_pid(),
- &transfer_buffer);
-#endif
- size = buffer.size;
- }
-
- GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
- transfer_buffer,
- size);
- } else {
- reply_message->set_reply_error();
- }
- Send(reply_message);
-}
-
void GpuCommandBufferStub::OnCommandProcessed() {
if (watchdog_)
watchdog_->CheckArmed();
}
-void GpuCommandBufferStub::ReportState() {
- if (!CheckContextLost())
- command_buffer_->UpdateState();
-}
+void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
void GpuCommandBufferStub::PutChanged() {
FastSetActiveURL(active_url_, active_url_hash_);
void GpuCommandBufferStub::OnCreateVideoDecoder(
media::VideoCodecProfile profile,
+ int32 decoder_route_id,
IPC::Message* reply_message) {
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
- int decoder_route_id = channel_->GenerateRouteID();
GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
decoder_route_id, this, channel_->io_message_loop());
decoder->Initialize(profile, reply_message);
// self-delete during destruction of this stub.
}
+void GpuCommandBufferStub::OnCreateVideoEncoder(
+ media::VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ media::VideoCodecProfile output_profile,
+ uint32 initial_bitrate,
+ int32 encoder_route_id,
+ IPC::Message* reply_message) {
+ TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
+ GpuVideoEncodeAccelerator* encoder =
+ new GpuVideoEncodeAccelerator(encoder_route_id, this);
+ encoder->Initialize(input_format,
+ input_visible_size,
+ output_profile,
+ initial_bitrate,
+ reply_message);
+ // encoder is registered as a DestructionObserver of this stub and will
+ // self-delete during destruction of this stub.
+}
+
void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
if (memory_manager_client_state_)
void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
sync_points_.pop_front();
+ if (context_group_->mailbox_manager()->UsesSync() && MakeCurrent())
+ context_group_->mailbox_manager()->PushTextureUpdates();
GpuChannelManager* manager = channel_->gpu_channel_manager();
manager->sync_point_manager()->RetireSyncPoint(sync_point);
}
bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
+ if (!sync_point)
+ return true;
GpuChannelManager* manager = channel_->gpu_channel_manager();
if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
return true;
uint32 height,
uint32 internalformat) {
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterGpuMemoryBuffer");
- if (gpu_control_) {
- gpu_control_->RegisterGpuMemoryBuffer(id,
- gpu_memory_buffer,
- width,
- height,
- internalformat);
+#if defined(OS_ANDROID)
+ // Verify that renderer is not trying to use a surface texture it doesn't own.
+ if (gpu_memory_buffer.type == gfx::SURFACE_TEXTURE_BUFFER &&
+ gpu_memory_buffer.surface_texture_id.secondary_id !=
+ channel()->client_id()) {
+ LOG(ERROR) << "Illegal surface texture ID for renderer.";
+ return;
+ }
+#endif
+ if (gpu_control_service_) {
+ gpu_control_service_->RegisterGpuMemoryBuffer(
+ id, gpu_memory_buffer, width, height, internalformat);
}
}
void GpuCommandBufferStub::OnDestroyGpuMemoryBuffer(int32 id) {
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyGpuMemoryBuffer");
- if (gpu_control_)
- gpu_control_->DestroyGpuMemoryBuffer(id);
+ if (gpu_control_service_)
+ gpu_control_service_->UnregisterGpuMemoryBuffer(id);
}
void GpuCommandBufferStub::SendConsoleMessage(
bool GpuCommandBufferStub::CheckContextLost() {
DCHECK(command_buffer_);
- gpu::CommandBuffer::State state = command_buffer_->GetState();
+ gpu::CommandBuffer::State state = command_buffer_->GetLastState();
bool was_lost = state.error == gpu::error::kLostContext;
// Lose all other contexts if the reset was triggered by the robustness
// extension instead of being synthetic.
(gfx::GLContext::LosesAllContextsOnContextLost() ||
use_virtualized_gl_context_))
channel_->LoseAllContexts();
+ CheckCompleteWaits();
return was_lost;
}
void GpuCommandBufferStub::MarkContextLost() {
if (!command_buffer_ ||
- command_buffer_->GetState().error == gpu::error::kLostContext)
+ command_buffer_->GetLastState().error == gpu::error::kLostContext)
return;
command_buffer_->SetContextLostReason(gpu::error::kUnknown);