1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/gpu_scheduler.h"
8 #include "base/command_line.h"
9 #include "base/compiler_specific.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/time/time.h"
13 #include "ui/gl/gl_bindings.h"
14 #include "ui/gl/gl_fence.h"
15 #include "ui/gl/gl_switches.h"
18 #include "base/win/windows_version.h"
21 using ::base::SharedMemory;
25 const int64 kUnscheduleFenceTimeOutDelay = 10000;
28 const int64 kRescheduleTimeOutDelay = 1000;
31 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
32 AsyncAPIInterface* handler,
33 gles2::GLES2Decoder* decoder)
34 : command_buffer_(command_buffer),
37 unscheduled_count_(0),
38 rescheduled_count_(0),
39 reschedule_task_factory_(this),
40 was_preempted_(false) {}
42 GpuScheduler::~GpuScheduler() {
45 void GpuScheduler::PutChanged() {
47 "gpu", "GpuScheduler:PutChanged",
48 "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
50 CommandBuffer::State state = command_buffer_->GetState();
52 // If there is no parser, exit.
54 DCHECK_EQ(state.get_offset, state.put_offset);
58 parser_->set_put(state.put_offset);
59 if (state.error != error::kNoError)
62 // Check that the GPU has passed all fences.
63 if (!PollUnscheduleFences())
66 // One of the unschedule fence tasks might have unscheduled us.
70 base::TimeTicks begin_time(base::TimeTicks::HighResNow());
71 error::Error error = error::kNoError;
72 while (!parser_->IsEmpty()) {
76 DCHECK(IsScheduled());
77 DCHECK(unschedule_fences_.empty());
79 error = parser_->ProcessCommand();
81 if (error == error::kDeferCommandUntilLater) {
82 DCHECK_GT(unscheduled_count_, 0);
86 // TODO(piman): various classes duplicate various pieces of state, leading
87 // to needlessly complex update logic. It should be possible to simply
88 // share the state across all of them.
89 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
91 if (error::IsError(error)) {
92 LOG(ERROR) << "[" << decoder_ << "] "
93 << "GPU PARSE ERROR: " << error;
94 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
95 command_buffer_->SetParseError(error);
99 if (!command_processed_callback_.is_null())
100 command_processed_callback_.Run();
102 if (unscheduled_count_ > 0)
107 if (!error::IsError(error) && decoder_->WasContextLost()) {
108 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
109 command_buffer_->SetParseError(error::kLostContext);
111 decoder_->AddProcessingCommandsTime(
112 base::TimeTicks::HighResNow() - begin_time);
116 void GpuScheduler::SetScheduled(bool scheduled) {
117 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
118 "new unscheduled_count_",
119 unscheduled_count_ + (scheduled? -1 : 1));
121 // If the scheduler was rescheduled after a timeout, ignore the subsequent
122 // calls to SetScheduled when they eventually arrive until they are all
124 if (rescheduled_count_ > 0) {
125 --rescheduled_count_;
128 --unscheduled_count_;
131 DCHECK_GE(unscheduled_count_, 0);
133 if (unscheduled_count_ == 0) {
134 TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
135 "GpuScheduler", this);
136 // When the scheduler transitions from the unscheduled to the scheduled
137 // state, cancel the task that would reschedule it after a timeout.
138 reschedule_task_factory_.InvalidateWeakPtrs();
140 if (!scheduling_changed_callback_.is_null())
141 scheduling_changed_callback_.Run(true);
144 ++unscheduled_count_;
145 if (unscheduled_count_ == 1) {
146 TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
147 "GpuScheduler", this);
149 if (base::win::GetVersion() < base::win::VERSION_VISTA) {
150 // When the scheduler transitions from scheduled to unscheduled, post a
151 // delayed task that it will force it back into a scheduled state after
152 // a timeout. This should only be necessary on pre-Vista.
153 base::MessageLoop::current()->PostDelayedTask(
155 base::Bind(&GpuScheduler::RescheduleTimeOut,
156 reschedule_task_factory_.GetWeakPtr()),
157 base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
160 if (!scheduling_changed_callback_.is_null())
161 scheduling_changed_callback_.Run(false);
166 bool GpuScheduler::IsScheduled() {
167 return unscheduled_count_ == 0;
170 bool GpuScheduler::HasMoreWork() {
171 return !unschedule_fences_.empty() ||
172 (decoder_ && decoder_->ProcessPendingQueries()) ||
176 void GpuScheduler::SetSchedulingChangedCallback(
177 const SchedulingChangedCallback& callback) {
178 scheduling_changed_callback_ = callback;
181 Buffer GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
182 return command_buffer_->GetTransferBuffer(shm_id);
185 void GpuScheduler::set_token(int32 token) {
186 command_buffer_->SetToken(token);
189 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
190 Buffer ring_buffer = command_buffer_->GetTransferBuffer(transfer_buffer_id);
191 if (!ring_buffer.ptr) {
195 if (!parser_.get()) {
196 parser_.reset(new CommandParser(handler_));
209 bool GpuScheduler::SetGetOffset(int32 offset) {
210 if (parser_->set_get(offset)) {
211 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
217 int32 GpuScheduler::GetGetOffset() {
218 return parser_->get();
221 void GpuScheduler::SetCommandProcessedCallback(
222 const base::Closure& callback) {
223 command_processed_callback_ = callback;
226 void GpuScheduler::DeferToFence(base::Closure task) {
227 unschedule_fences_.push(make_linked_ptr(
228 new UnscheduleFence(gfx::GLFence::Create(), task)));
232 bool GpuScheduler::PollUnscheduleFences() {
233 if (unschedule_fences_.empty())
236 if (unschedule_fences_.front()->fence.get()) {
237 base::Time now = base::Time::Now();
238 base::TimeDelta timeout =
239 base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
241 while (!unschedule_fences_.empty()) {
242 const UnscheduleFence& fence = *unschedule_fences_.front();
243 if (fence.fence->HasCompleted() ||
244 now - fence.issue_time > timeout) {
245 unschedule_fences_.front()->task.Run();
246 unschedule_fences_.pop();
255 while (!unschedule_fences_.empty()) {
256 unschedule_fences_.front()->task.Run();
257 unschedule_fences_.pop();
265 bool GpuScheduler::IsPreempted() {
266 if (!preemption_flag_.get())
269 if (!was_preempted_ && preemption_flag_->IsSet()) {
270 TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
271 was_preempted_ = true;
272 } else if (was_preempted_ && !preemption_flag_->IsSet()) {
273 TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
274 was_preempted_ = false;
277 return preemption_flag_->IsSet();
280 bool GpuScheduler::HasMoreIdleWork() {
281 return (decoder_ && decoder_->HasMoreIdleWork());
284 void GpuScheduler::PerformIdleWork() {
287 decoder_->PerformIdleWork();
290 void GpuScheduler::RescheduleTimeOut() {
291 int new_count = unscheduled_count_ + rescheduled_count_;
293 rescheduled_count_ = 0;
295 while (unscheduled_count_)
298 rescheduled_count_ = new_count;
301 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
304 issue_time(base::Time::Now()),
308 GpuScheduler::UnscheduleFence::~UnscheduleFence() {