- add sources.
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / service / gpu_scheduler.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/gpu_scheduler.h"
6
7 #include "base/bind.h"
8 #include "base/command_line.h"
9 #include "base/compiler_specific.h"
10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/time/time.h"
13 #include "ui/gl/gl_bindings.h"
14 #include "ui/gl/gl_fence.h"
15 #include "ui/gl/gl_switches.h"
16
17 #if defined(OS_WIN)
18 #include "base/win/windows_version.h"
19 #endif
20
21 using ::base::SharedMemory;
22
23 namespace gpu {
24
25 const int64 kUnscheduleFenceTimeOutDelay = 10000;
26
27 #if defined(OS_WIN)
28 const int64 kRescheduleTimeOutDelay = 1000;
29 #endif
30
31 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
32                            AsyncAPIInterface* handler,
33                            gles2::GLES2Decoder* decoder)
34     : command_buffer_(command_buffer),
35       handler_(handler),
36       decoder_(decoder),
37       unscheduled_count_(0),
38       rescheduled_count_(0),
39       reschedule_task_factory_(this),
40       was_preempted_(false) {}
41
42 GpuScheduler::~GpuScheduler() {
43 }
44
45 void GpuScheduler::PutChanged() {
46   TRACE_EVENT1(
47      "gpu", "GpuScheduler:PutChanged",
48      "decoder", decoder_ ? decoder_->GetLogger()->GetLogPrefix() : "None");
49
50   CommandBuffer::State state = command_buffer_->GetState();
51
52   // If there is no parser, exit.
53   if (!parser_.get()) {
54     DCHECK_EQ(state.get_offset, state.put_offset);
55     return;
56   }
57
58   parser_->set_put(state.put_offset);
59   if (state.error != error::kNoError)
60     return;
61
62   // Check that the GPU has passed all fences.
63   if (!PollUnscheduleFences())
64     return;
65
66   // One of the unschedule fence tasks might have unscheduled us.
67   if (!IsScheduled())
68     return;
69
70   base::TimeTicks begin_time(base::TimeTicks::HighResNow());
71   error::Error error = error::kNoError;
72   while (!parser_->IsEmpty()) {
73     if (IsPreempted())
74       break;
75
76     DCHECK(IsScheduled());
77     DCHECK(unschedule_fences_.empty());
78
79     error = parser_->ProcessCommand();
80
81     if (error == error::kDeferCommandUntilLater) {
82       DCHECK_GT(unscheduled_count_, 0);
83       break;
84     }
85
86     // TODO(piman): various classes duplicate various pieces of state, leading
87     // to needlessly complex update logic. It should be possible to simply
88     // share the state across all of them.
89     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
90
91     if (error::IsError(error)) {
92       LOG(ERROR) << "[" << decoder_ << "] "
93                  << "GPU PARSE ERROR: " << error;
94       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
95       command_buffer_->SetParseError(error);
96       break;
97     }
98
99     if (!command_processed_callback_.is_null())
100       command_processed_callback_.Run();
101
102     if (unscheduled_count_ > 0)
103       break;
104   }
105
106   if (decoder_) {
107     if (!error::IsError(error) && decoder_->WasContextLost()) {
108       command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
109       command_buffer_->SetParseError(error::kLostContext);
110     }
111     decoder_->AddProcessingCommandsTime(
112         base::TimeTicks::HighResNow() - begin_time);
113   }
114 }
115
116 void GpuScheduler::SetScheduled(bool scheduled) {
117   TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
118                "new unscheduled_count_",
119                unscheduled_count_ + (scheduled? -1 : 1));
120   if (scheduled) {
121     // If the scheduler was rescheduled after a timeout, ignore the subsequent
122     // calls to SetScheduled when they eventually arrive until they are all
123     // accounted for.
124     if (rescheduled_count_ > 0) {
125       --rescheduled_count_;
126       return;
127     } else {
128       --unscheduled_count_;
129     }
130
131     DCHECK_GE(unscheduled_count_, 0);
132
133     if (unscheduled_count_ == 0) {
134       TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
135                              "GpuScheduler", this);
136       // When the scheduler transitions from the unscheduled to the scheduled
137       // state, cancel the task that would reschedule it after a timeout.
138       reschedule_task_factory_.InvalidateWeakPtrs();
139
140       if (!scheduling_changed_callback_.is_null())
141         scheduling_changed_callback_.Run(true);
142     }
143   } else {
144     ++unscheduled_count_;
145     if (unscheduled_count_ == 1) {
146       TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
147                                "GpuScheduler", this);
148 #if defined(OS_WIN)
149       if (base::win::GetVersion() < base::win::VERSION_VISTA) {
150         // When the scheduler transitions from scheduled to unscheduled, post a
151         // delayed task that it will force it back into a scheduled state after
152         // a timeout. This should only be necessary on pre-Vista.
153         base::MessageLoop::current()->PostDelayedTask(
154             FROM_HERE,
155             base::Bind(&GpuScheduler::RescheduleTimeOut,
156                        reschedule_task_factory_.GetWeakPtr()),
157             base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
158       }
159 #endif
160       if (!scheduling_changed_callback_.is_null())
161         scheduling_changed_callback_.Run(false);
162     }
163   }
164 }
165
166 bool GpuScheduler::IsScheduled() {
167   return unscheduled_count_ == 0;
168 }
169
170 bool GpuScheduler::HasMoreWork() {
171   return !unschedule_fences_.empty() ||
172          (decoder_ && decoder_->ProcessPendingQueries()) ||
173          HasMoreIdleWork();
174 }
175
176 void GpuScheduler::SetSchedulingChangedCallback(
177     const SchedulingChangedCallback& callback) {
178   scheduling_changed_callback_ = callback;
179 }
180
181 Buffer GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) {
182   return command_buffer_->GetTransferBuffer(shm_id);
183 }
184
185 void GpuScheduler::set_token(int32 token) {
186   command_buffer_->SetToken(token);
187 }
188
189 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) {
190   Buffer ring_buffer = command_buffer_->GetTransferBuffer(transfer_buffer_id);
191   if (!ring_buffer.ptr) {
192     return false;
193   }
194
195   if (!parser_.get()) {
196     parser_.reset(new CommandParser(handler_));
197   }
198
199   parser_->SetBuffer(
200       ring_buffer.ptr,
201       ring_buffer.size,
202       0,
203       ring_buffer.size);
204
205   SetGetOffset(0);
206   return true;
207 }
208
209 bool GpuScheduler::SetGetOffset(int32 offset) {
210   if (parser_->set_get(offset)) {
211     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
212     return true;
213   }
214   return false;
215 }
216
217 int32 GpuScheduler::GetGetOffset() {
218   return parser_->get();
219 }
220
221 void GpuScheduler::SetCommandProcessedCallback(
222     const base::Closure& callback) {
223   command_processed_callback_ = callback;
224 }
225
226 void GpuScheduler::DeferToFence(base::Closure task) {
227   unschedule_fences_.push(make_linked_ptr(
228        new UnscheduleFence(gfx::GLFence::Create(), task)));
229   SetScheduled(false);
230 }
231
232 bool GpuScheduler::PollUnscheduleFences() {
233   if (unschedule_fences_.empty())
234     return true;
235
236   if (unschedule_fences_.front()->fence.get()) {
237     base::Time now = base::Time::Now();
238     base::TimeDelta timeout =
239         base::TimeDelta::FromMilliseconds(kUnscheduleFenceTimeOutDelay);
240
241     while (!unschedule_fences_.empty()) {
242       const UnscheduleFence& fence = *unschedule_fences_.front();
243       if (fence.fence->HasCompleted() ||
244           now - fence.issue_time > timeout) {
245         unschedule_fences_.front()->task.Run();
246         unschedule_fences_.pop();
247         SetScheduled(true);
248       } else {
249         return false;
250       }
251     }
252   } else {
253     glFinish();
254
255     while (!unschedule_fences_.empty()) {
256       unschedule_fences_.front()->task.Run();
257       unschedule_fences_.pop();
258       SetScheduled(true);
259     }
260   }
261
262   return true;
263 }
264
265 bool GpuScheduler::IsPreempted() {
266   if (!preemption_flag_.get())
267     return false;
268
269   if (!was_preempted_ && preemption_flag_->IsSet()) {
270     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 1);
271     was_preempted_ = true;
272   } else if (was_preempted_ && !preemption_flag_->IsSet()) {
273     TRACE_COUNTER_ID1("gpu", "GpuScheduler::Preempted", this, 0);
274     was_preempted_ = false;
275   }
276
277   return preemption_flag_->IsSet();
278 }
279
280 bool GpuScheduler::HasMoreIdleWork() {
281   return (decoder_ && decoder_->HasMoreIdleWork());
282 }
283
284 void GpuScheduler::PerformIdleWork() {
285   if (!decoder_)
286     return;
287   decoder_->PerformIdleWork();
288 }
289
290 void GpuScheduler::RescheduleTimeOut() {
291   int new_count = unscheduled_count_ + rescheduled_count_;
292
293   rescheduled_count_ = 0;
294
295   while (unscheduled_count_)
296     SetScheduled(true);
297
298   rescheduled_count_ = new_count;
299 }
300
301 GpuScheduler::UnscheduleFence::UnscheduleFence(gfx::GLFence* fence_,
302                                                base::Closure task_)
303   : fence(fence_),
304     issue_time(base::Time::Now()),
305     task(task_) {
306 }
307
308 GpuScheduler::UnscheduleFence::~UnscheduleFence() {
309 }
310
311 }  // namespace gpu