2 * Copyright (C) 2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include "mali_pp_scheduler.h"
12 #include "mali_kernel_common.h"
13 #include "mali_kernel_core.h"
15 #include "mali_osk_list.h"
16 #include "mali_scheduler.h"
18 #include "mali_pp_job.h"
19 #include "mali_group.h"
21 #include "mali_timeline.h"
22 #include "mali_osk_profiling.h"
23 #include "mali_kernel_utilization.h"
24 #include "mali_session.h"
25 #include "mali_pm_domain.h"
26 #include "linux/mali/mali_utgard.h"
28 #if defined(CONFIG_DMA_SHARED_BUFFER)
29 #include "mali_memory_dma_buf.h"
31 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
32 #include <linux/sched.h>
33 #include <trace/events/gpu.h>
36 /* Queue type used for physical and virtual job queues. */
37 struct mali_pp_scheduler_job_queue {
38 _MALI_OSK_LIST_HEAD(normal_pri); /* List of jobs with some unscheduled work. */
39 _MALI_OSK_LIST_HEAD(high_pri); /* List of high priority jobs with some unscheduled work. */
40 u32 depth; /* Depth of combined queues. */
43 /* If dma_buf with map on demand is used, we defer job deletion and job queue if in atomic context,
44 * since both might sleep. */
45 #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
46 #define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
47 #define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE 1
48 #endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
50 static void mali_pp_scheduler_job_queued(void);
51 static void mali_pp_scheduler_job_completed(void);
53 /* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
54 #define MALI_MAX_NUMBER_OF_PP_GROUPS 9
56 static mali_bool mali_pp_scheduler_is_suspended(void *data);
58 static u32 pp_version = 0;
60 /* Physical job queue */
61 static struct mali_pp_scheduler_job_queue job_queue;
64 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); /* List of physical groups with working jobs on the pp core */
65 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); /* List of physical groups with idle jobs on the pp core */
66 static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); /* List of disabled physical groups */
68 /* Virtual job queue (Mali-450 only) */
69 static struct mali_pp_scheduler_job_queue virtual_job_queue;
72 * Add job to scheduler queue.
74 * @param job Job to queue.
75 * @return Schedule mask.
77 static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job);
79 /* Virtual group (Mali-450 only) */
80 static struct mali_group *virtual_group = NULL; /* Virtual group (if any) */
83 VIRTUAL_GROUP_WORKING,
84 VIRTUAL_GROUP_DISABLED,
86 virtual_group_state = VIRTUAL_GROUP_IDLE; /* Flag which indicates whether the virtual group is working or idle */
88 /* Number of physical cores */
89 static u32 num_cores = 0;
91 /* Number of physical cores which are enabled */
92 static u32 enabled_cores = 0;
94 /* Enable or disable core scaling */
95 static mali_bool core_scaling_enabled = MALI_TRUE;
97 /* Variables to allow safe pausing of the scheduler */
98 static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
99 static u32 pause_count = 0;
101 #if defined(MALI_UPPER_HALF_SCHEDULING)
102 static _mali_osk_spinlock_irq_t *pp_scheduler_lock = NULL;
104 static _mali_osk_spinlock_t *pp_scheduler_lock = NULL;
105 #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
107 MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
109 #if defined(MALI_UPPER_HALF_SCHEDULING)
110 _mali_osk_spinlock_irq_lock(pp_scheduler_lock);
112 _mali_osk_spinlock_lock(pp_scheduler_lock);
113 #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
114 MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken.\n"));
117 MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
119 MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock.\n"));
120 #if defined(MALI_UPPER_HALF_SCHEDULING)
121 _mali_osk_spinlock_irq_unlock(pp_scheduler_lock);
123 _mali_osk_spinlock_unlock(pp_scheduler_lock);
124 #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
128 #define MALI_ASSERT_PP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock)
130 #define MALI_ASSERT_PP_SCHEDULER_LOCKED() do {} while (0)
131 #endif /* defined(DEBUG) */
133 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
135 static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
136 static _mali_osk_spinlock_irq_t *pp_scheduler_job_delete_lock = NULL;
137 static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
139 static void mali_pp_scheduler_deferred_job_delete(struct mali_pp_job *job)
141 MALI_DEBUG_ASSERT_POINTER(job);
143 _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
145 /* This job object should not be on any lists. */
146 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
147 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
148 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
150 _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
152 _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
154 _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
157 static void mali_pp_scheduler_do_job_delete(void *arg)
159 _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
160 struct mali_pp_job *job;
161 struct mali_pp_job *tmp;
165 _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
168 * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
169 * we start deleting the job objects (without any locks held
171 _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
173 _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
175 _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
176 mali_pp_job_delete(job); /* delete the job object itself */
180 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
182 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
184 static _mali_osk_wq_work_t *pp_scheduler_wq_job_queue = NULL;
185 static _mali_osk_spinlock_irq_t *pp_scheduler_job_queue_lock = NULL;
186 static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_queue_list);
188 static void mali_pp_scheduler_deferred_job_queue(struct mali_pp_job *job)
190 MALI_DEBUG_ASSERT_POINTER(job);
192 _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
193 _mali_osk_list_addtail(&job->list, &pp_scheduler_job_queue_list);
194 _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
196 _mali_osk_wq_schedule_work(pp_scheduler_wq_job_queue);
199 static void mali_pp_scheduler_do_job_queue(void *arg)
201 _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
202 struct mali_pp_job *job;
203 struct mali_pp_job *tmp;
204 mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
208 _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
211 * Quickly "unhook" the jobs pending to be queued, so we can release the lock before
212 * we start queueing the job objects (without any locks held)
214 _mali_osk_list_move_list(&pp_scheduler_job_queue_list, &list);
216 _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
218 _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
219 _mali_osk_list_delinit(&job->list);
220 schedule_mask |= mali_pp_scheduler_queue_job(job);
223 mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
226 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
228 MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
230 #if defined(CONFIG_MALI450)
231 return NULL != virtual_group;
234 #endif /* defined(CONFIG_MALI450) */
237 _mali_osk_errcode_t mali_pp_scheduler_initialize(void)
239 _MALI_OSK_INIT_LIST_HEAD(&job_queue.normal_pri);
240 _MALI_OSK_INIT_LIST_HEAD(&job_queue.high_pri);
243 _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue.normal_pri);
244 _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue.high_pri);
245 virtual_job_queue.depth = 0;
247 #if defined(MALI_UPPER_HALF_SCHEDULING)
248 pp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
250 pp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
251 #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
252 if (NULL == pp_scheduler_lock) goto cleanup;
254 pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
255 if (NULL == pp_scheduler_working_wait_queue) goto cleanup;
257 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
258 pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
259 if (NULL == pp_scheduler_wq_job_delete) goto cleanup;
261 pp_scheduler_job_delete_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
262 if (NULL == pp_scheduler_job_delete_lock) goto cleanup;
263 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
265 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
266 pp_scheduler_wq_job_queue = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_queue, NULL);
267 if (NULL == pp_scheduler_wq_job_queue) goto cleanup;
269 pp_scheduler_job_queue_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
270 if (NULL == pp_scheduler_job_queue_lock) goto cleanup;
271 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
273 return _MALI_OSK_ERR_OK;
276 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
277 if (NULL != pp_scheduler_job_queue_lock) {
278 _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
279 pp_scheduler_job_queue_lock = NULL;
282 if (NULL != pp_scheduler_wq_job_queue) {
283 _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
284 pp_scheduler_wq_job_queue = NULL;
286 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
288 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
289 if (NULL != pp_scheduler_job_delete_lock) {
290 _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
291 pp_scheduler_job_delete_lock = NULL;
294 if (NULL != pp_scheduler_wq_job_delete) {
295 _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
296 pp_scheduler_wq_job_delete = NULL;
298 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
300 if (NULL != pp_scheduler_working_wait_queue) {
301 _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
302 pp_scheduler_working_wait_queue = NULL;
305 if (NULL != pp_scheduler_lock) {
306 #if defined(MALI_UPPER_HALF_SCHEDULING)
307 _mali_osk_spinlock_irq_term(pp_scheduler_lock);
309 _mali_osk_spinlock_term(pp_scheduler_lock);
310 #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
311 pp_scheduler_lock = NULL;
314 return _MALI_OSK_ERR_NOMEM;
317 void mali_pp_scheduler_terminate(void)
319 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
320 _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
321 _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
322 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
324 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
325 _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
326 _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
327 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
329 _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
331 #if defined(MALI_UPPER_HALF_SCHEDULING)
332 _mali_osk_spinlock_irq_term(pp_scheduler_lock);
334 _mali_osk_spinlock_term(pp_scheduler_lock);
335 #endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
338 void mali_pp_scheduler_populate(void)
340 struct mali_group *group;
341 struct mali_pp_core *pp_core;
345 num_groups = mali_group_get_glob_num_groups();
347 /* Do we have a virtual group? */
348 for (i = 0; i < num_groups; i++) {
349 group = mali_group_get_glob_group(i);
351 if (mali_group_is_virtual(group)) {
352 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Found virtual group %p.\n", group));
354 virtual_group = group;
359 /* Find all the available PP cores */
360 for (i = 0; i < num_groups; i++) {
361 group = mali_group_get_glob_group(i);
362 pp_core = mali_group_get_pp_core(group);
364 if (NULL != pp_core && !mali_group_is_virtual(group)) {
365 if (0 == pp_version) {
366 /* Retrieve PP version from the first available PP core */
367 pp_version = mali_pp_core_get_version(pp_core);
370 if (mali_pp_scheduler_has_virtual_group()) {
371 /* Add all physical PP cores to the virtual group */
372 mali_group_lock(virtual_group);
373 group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
374 mali_group_add_group(virtual_group, group, MALI_TRUE);
375 mali_group_unlock(virtual_group);
377 _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
384 enabled_cores = num_cores;
387 void mali_pp_scheduler_depopulate(void)
389 struct mali_group *group, *temp;
391 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
392 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
394 /* Delete all groups owned by scheduler */
395 if (mali_pp_scheduler_has_virtual_group()) {
396 mali_group_delete(virtual_group);
399 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
400 mali_group_delete(group);
402 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
403 mali_group_delete(group);
407 MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
409 MALI_ASSERT_GROUP_LOCKED(virtual_group);
411 if (mali_group_virtual_disable_if_empty(virtual_group)) {
412 MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
414 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
416 virtual_group_state = VIRTUAL_GROUP_DISABLED;
420 MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
422 MALI_ASSERT_GROUP_LOCKED(virtual_group);
424 if (mali_group_virtual_enable_if_empty(virtual_group)) {
425 MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
427 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
429 virtual_group_state = VIRTUAL_GROUP_IDLE;
433 static struct mali_pp_job *mali_pp_scheduler_get_job(struct mali_pp_scheduler_job_queue *queue)
435 struct mali_pp_job *job = NULL;
437 MALI_ASSERT_PP_SCHEDULER_LOCKED();
438 MALI_DEBUG_ASSERT_POINTER(queue);
440 /* Check if we have a normal priority job. */
441 if (!_mali_osk_list_empty(&queue->normal_pri)) {
442 MALI_DEBUG_ASSERT(queue->depth > 0);
443 job = _MALI_OSK_LIST_ENTRY(queue->normal_pri.next, struct mali_pp_job, list);
446 /* Prefer normal priority job if it is in progress. */
447 if (NULL != job && 0 < job->sub_jobs_started) {
451 /* Check if we have a high priority job. */
452 if (!_mali_osk_list_empty(&queue->high_pri)) {
453 MALI_DEBUG_ASSERT(queue->depth > 0);
454 job = _MALI_OSK_LIST_ENTRY(queue->high_pri.next, struct mali_pp_job, list);
461 * Returns a physical job if a physical job is ready to run
463 MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
465 MALI_ASSERT_PP_SCHEDULER_LOCKED();
466 return mali_pp_scheduler_get_job(&job_queue);
469 MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
471 MALI_ASSERT_PP_SCHEDULER_LOCKED();
472 MALI_DEBUG_ASSERT(job_queue.depth > 0);
474 /* Remove job from queue */
475 if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
476 /* All sub jobs have been started: remove job from queue */
477 _mali_osk_list_delinit(&job->list);
478 _mali_osk_list_delinit(&job->session_fb_lookup_list);
485 * Returns a virtual job if a virtual job is ready to run
487 MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_job(void)
489 MALI_ASSERT_PP_SCHEDULER_LOCKED();
490 MALI_DEBUG_ASSERT_POINTER(virtual_group);
491 return mali_pp_scheduler_get_job(&virtual_job_queue);
494 MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_virtual_job(struct mali_pp_job *job)
496 MALI_ASSERT_PP_SCHEDULER_LOCKED();
497 MALI_DEBUG_ASSERT(virtual_job_queue.depth > 0);
499 /* Remove job from queue */
500 _mali_osk_list_delinit(&job->list);
501 _mali_osk_list_delinit(&job->session_fb_lookup_list);
502 --virtual_job_queue.depth;
506 * Checks if the criteria is met for removing a physical core from virtual group
508 MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
510 MALI_ASSERT_PP_SCHEDULER_LOCKED();
511 MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
512 MALI_ASSERT_GROUP_LOCKED(virtual_group);
514 * The criteria for taking out a physical group from a virtual group are the following:
515 * - There virtual group is idle
516 * - There are currently no physical groups (idle and working)
517 * - There are physical jobs to be scheduled
519 return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
520 _mali_osk_list_empty(&group_list_idle) &&
521 _mali_osk_list_empty(&group_list_working) &&
522 (NULL != mali_pp_scheduler_get_physical_job());
525 MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
527 MALI_ASSERT_PP_SCHEDULER_LOCKED();
529 if (!_mali_osk_list_empty(&group_list_idle)) {
530 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list.\n"));
531 return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
532 } else if (mali_pp_scheduler_has_virtual_group()) {
533 MALI_ASSERT_GROUP_LOCKED(virtual_group);
534 if (mali_pp_scheduler_can_move_virtual_to_physical()) {
535 struct mali_group *group;
536 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group.\n"));
537 group = mali_group_acquire_group(virtual_group);
539 if (mali_pp_scheduler_has_virtual_group()) {
540 mali_pp_scheduler_disable_empty_virtual();
550 static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
552 if (MALI_FALSE == mali_pp_job_use_no_notification(job)) {
554 u32 num_counters_to_copy;
555 mali_bool success = mali_pp_job_was_success(job);
557 _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
558 _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
559 jobres->user_job_ptr = mali_pp_job_get_user_id(job);
560 if (MALI_TRUE == success) {
561 jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
563 jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
566 if (mali_pp_job_is_virtual(job)) {
567 num_counters_to_copy = num_cores; /* Number of physical cores available */
569 num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
572 for (i = 0; i < num_counters_to_copy; i++) {
573 jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
574 jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
575 jobres->perf_counter_src0 = mali_pp_job_get_pp_counter_global_src0();
576 jobres->perf_counter_src1 = mali_pp_job_get_pp_counter_global_src1();
579 mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
580 job->finished_notification = NULL;
583 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
584 if (MALI_TRUE == deferred) {
585 /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
586 mali_pp_scheduler_deferred_job_delete(job);
588 mali_pp_job_delete(job);
591 MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
592 mali_pp_job_delete(job);
596 static void mali_pp_scheduler_finalize_job(struct mali_pp_job * job)
598 /* This job object should not be on any lists. */
599 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
600 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
601 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
603 /* Send notification back to user space */
604 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
605 mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
607 mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
610 #if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
611 if (_MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE & job->uargs.flags) {
612 _mali_osk_atomic_inc(&job->session->number_of_window_jobs);
616 mali_pp_scheduler_job_completed();
619 void mali_pp_scheduler_schedule(void)
621 struct mali_group* physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
622 struct mali_pp_job* physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
623 u32 physical_sub_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
624 int num_physical_jobs_to_start = 0;
627 if (mali_pp_scheduler_has_virtual_group()) {
628 /* Lock the virtual group since we might have to grab physical groups. */
629 mali_group_lock(virtual_group);
632 mali_pp_scheduler_lock();
633 if (pause_count > 0) {
634 /* Scheduler is suspended, don't schedule any jobs. */
635 mali_pp_scheduler_unlock();
636 if (mali_pp_scheduler_has_virtual_group()) {
637 mali_group_unlock(virtual_group);
642 /* Find physical job(s) to schedule first. */
644 struct mali_group *group;
645 struct mali_pp_job *job;
648 job = mali_pp_scheduler_get_physical_job();
650 break; /* No job, early out. */
653 if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) &&
654 mali_pp_job_is_large_and_unstarted(job) && !_mali_osk_list_empty(&group_list_working)) {
655 /* Since not all groups are idle, don't schedule yet. */
659 MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
660 MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
661 MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
663 /* Acquire a physical group, either from the idle list or from the virtual group.
664 * In case the group was acquired from the virtual group, it's state will be
665 * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
666 group = mali_pp_scheduler_acquire_physical_group();
668 /* Could not get a group to run the job on, early out. */
669 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
673 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p.\n", group));
675 /* Mark sub job as started. */
676 sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
677 mali_pp_job_mark_sub_job_started(job, sub_job);
679 /* Remove job from queue (if this was the last sub job). */
680 mali_pp_scheduler_dequeue_physical_job(job);
682 /* Move group to working list. */
683 _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
685 /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding. */
686 physical_groups_to_start[num_physical_jobs_to_start] = group;
687 physical_jobs_to_start[num_physical_jobs_to_start] = job;
688 physical_sub_jobs_to_start[num_physical_jobs_to_start] = sub_job;
689 ++num_physical_jobs_to_start;
691 MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
694 if (mali_pp_scheduler_has_virtual_group()) {
695 if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
696 /* We have a virtual group and it is idle. */
698 struct mali_pp_job *job;
700 /* Find a virtual job we can start. */
701 job = mali_pp_scheduler_get_virtual_job();
704 MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
705 MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
706 MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
708 /* Mark the one and only sub job as started. */
709 mali_pp_job_mark_sub_job_started(job, 0);
711 /* Remove job from queue. */
712 mali_pp_scheduler_dequeue_virtual_job(job);
714 /* Virtual group is now working. */
715 virtual_group_state = VIRTUAL_GROUP_WORKING;
717 /* We no longer need the scheduler lock, but we still need the virtual lock
718 * in order to start the virtual job. */
719 mali_pp_scheduler_unlock();
722 mali_group_start_pp_job(virtual_group, job, 0);
724 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
725 mali_pp_job_get_id(job), job, 1,
726 mali_pp_job_get_sub_job_count(job)));
728 mali_group_unlock(virtual_group);
730 /* No virtual job to start. */
731 mali_pp_scheduler_unlock();
732 mali_group_unlock(virtual_group);
735 /* We have a virtual group, but it is busy or disabled. */
736 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE != virtual_group_state);
738 mali_pp_scheduler_unlock();
739 mali_group_unlock(virtual_group);
742 /* There is no virtual group. */
743 mali_pp_scheduler_unlock();
746 /* We have now released the scheduler lock, and we are ready to start the physical jobs.
747 * The reason we want to wait until we have released the scheduler lock is that job start
748 * may take quite a bit of time (many registers have to be written). This will allow new
749 * jobs from user space to come in, and post-processing of other PP jobs to happen at the
750 * same time as we start jobs. */
751 for (i = 0; i < num_physical_jobs_to_start; i++) {
752 struct mali_group *group = physical_groups_to_start[i];
753 struct mali_pp_job *job = physical_jobs_to_start[i];
754 u32 sub_job = physical_sub_jobs_to_start[i];
756 MALI_DEBUG_ASSERT_POINTER(group);
757 MALI_DEBUG_ASSERT_POINTER(job);
758 MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
759 MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
761 mali_group_lock(group);
763 /* Set state to IDLE if group was acquired from the virtual group. */
764 group->state = MALI_GROUP_STATE_IDLE;
766 mali_group_start_pp_job(group, job, sub_job);
768 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule).\n",
769 mali_pp_job_get_id(job), job, sub_job + 1,
770 mali_pp_job_get_sub_job_count(job)));
772 mali_group_unlock(group);
779 * If @ref group is the virtual group, nothing is done since the virtual group should be idle
782 * If @ref group is a physical group we rejoin the virtual group, if it exists. If not, we move the
783 * physical group to the idle list.
785 * @note The group and the scheduler must both be locked when entering this function. Both will be
786 * unlocked before exiting.
788 * @param group The group to set idle.
790 static void mali_pp_scheduler_set_group_idle_and_unlock(struct mali_group *group)
792 MALI_DEBUG_ASSERT_POINTER(group);
794 MALI_ASSERT_GROUP_LOCKED(group);
795 MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
797 if (mali_group_is_virtual(group)) {
798 /* The virtual group should have been set to non-working already. */
799 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
801 mali_pp_scheduler_unlock();
802 mali_group_unlock(group);
806 if (mali_pp_scheduler_has_virtual_group()) {
807 /* Rejoin virtual group. */
809 /* We're no longer needed on the scheduler list. */
810 _mali_osk_list_delinit(&(group->pp_scheduler_list));
812 /* Make sure no interrupts are handled for this group during the transition
813 * from physical to virtual. */
814 group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
816 mali_pp_scheduler_unlock();
817 mali_group_unlock(group);
819 mali_group_lock(virtual_group);
821 if (mali_pp_scheduler_has_virtual_group()) {
822 mali_pp_scheduler_enable_empty_virtual();
825 /* We need to recheck the group state since it is possible that someone has
826 * modified the group before we locked the virtual group. */
827 if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
828 mali_group_add_group(virtual_group, group, MALI_TRUE);
831 mali_group_unlock(virtual_group);
833 /* Move physical group back to idle list. */
834 _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
836 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
837 trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
840 mali_pp_scheduler_unlock();
841 mali_group_unlock(group);
847 * Schedule job on locked group.
849 * @note The group and the scheduler must both be locked when entering this function. Both will be
850 * unlocked before exiting.
852 * @param group The group to schedule on.
854 static void mali_pp_scheduler_schedule_on_group_and_unlock(struct mali_group *group)
856 MALI_DEBUG_ASSERT_POINTER(group);
858 MALI_ASSERT_GROUP_LOCKED(group);
859 MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
861 if (mali_group_is_virtual(group)) {
862 /* Now that the virtual group is idle, check if we should reconfigure. */
864 struct mali_pp_job *virtual_job = NULL;
865 struct mali_pp_job *physical_job = NULL;
866 struct mali_group *physical_group = NULL;
867 u32 physical_sub_job = 0;
869 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
871 if (mali_pp_scheduler_can_move_virtual_to_physical()) {
872 /* There is a runnable physical job and we can acquire a physical group. */
873 physical_job = mali_pp_scheduler_get_physical_job();
874 MALI_DEBUG_ASSERT_POINTER(physical_job);
875 MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
877 /* Mark sub job as started. */
878 physical_sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
879 mali_pp_job_mark_sub_job_started(physical_job, physical_sub_job);
881 /* Remove job from queue (if this was the last sub job). */
882 mali_pp_scheduler_dequeue_physical_job(physical_job);
884 /* Acquire a physical group from the virtual group. Its state will
885 * be LEAVING_VIRTUAL and must be set to IDLE before it can be
887 physical_group = mali_group_acquire_group(virtual_group);
889 /* Move physical group to the working list, as we will soon start a job on it. */
890 _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
892 mali_pp_scheduler_disable_empty_virtual();
895 /* Get next virtual job. */
896 virtual_job = mali_pp_scheduler_get_virtual_job();
897 if (NULL != virtual_job && VIRTUAL_GROUP_IDLE == virtual_group_state) {
898 /* There is a runnable virtual job. */
900 MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(virtual_job));
901 MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(virtual_job));
902 MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(virtual_job));
904 mali_pp_job_mark_sub_job_started(virtual_job, 0);
906 /* Remove job from queue. */
907 mali_pp_scheduler_dequeue_virtual_job(virtual_job);
909 /* Virtual group is now working. */
910 virtual_group_state = VIRTUAL_GROUP_WORKING;
912 mali_pp_scheduler_unlock();
915 mali_group_start_pp_job(group, virtual_job, 0);
917 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done).\n",
918 mali_pp_job_get_id(virtual_job), virtual_job, 1,
919 mali_pp_job_get_sub_job_count(virtual_job)));
921 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
922 trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
925 mali_pp_scheduler_unlock();
928 /* Releasing the virtual group lock that was held when entering the function. */
929 mali_group_unlock(group);
931 /* Start a physical job (if we acquired a physical group earlier). */
932 if (NULL != physical_job && NULL != physical_group) {
933 mali_group_lock(physical_group);
935 /* Change the group state from LEAVING_VIRTUAL to IDLE to complete the transition. */
936 physical_group->state = MALI_GROUP_STATE_IDLE;
939 mali_group_start_pp_job(physical_group, physical_job, physical_sub_job);
941 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
942 mali_pp_job_get_id(physical_job), physical_job, physical_sub_job + 1,
943 mali_pp_job_get_sub_job_count(physical_job)));
945 mali_group_unlock(physical_group);
948 /* Physical group. */
949 struct mali_pp_job *job = NULL;
952 job = mali_pp_scheduler_get_physical_job();
954 /* There is a runnable physical job. */
955 MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
957 /* Mark sub job as started. */
958 sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
959 mali_pp_job_mark_sub_job_started(job, sub_job);
961 /* Remove job from queue (if this was the last sub job). */
962 mali_pp_scheduler_dequeue_physical_job(job);
964 mali_pp_scheduler_unlock();
966 /* Group is already on the working list, so start the new job. */
967 mali_group_start_pp_job(group, job, sub_job);
969 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
970 mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
972 mali_group_unlock(group);
974 mali_pp_scheduler_set_group_idle_and_unlock(group);
979 void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half)
981 mali_bool job_is_done = MALI_FALSE;
982 mali_bool schedule_on_group = MALI_FALSE;
983 mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
985 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s).\n",
986 mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
987 mali_pp_job_get_id(job),
989 mali_pp_job_get_sub_job_count(job),
990 success ? "success" : "failure"));
992 MALI_ASSERT_GROUP_LOCKED(group);
993 mali_pp_scheduler_lock();
995 mali_pp_job_mark_sub_job_completed(job, success);
997 MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job) == mali_group_is_virtual(group));
999 job_is_done = mali_pp_job_is_complete(job);
1002 /* Job is removed from these lists when the last sub job is scheduled. */
1003 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
1004 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
1006 /* Remove job from session list. */
1007 _mali_osk_list_delinit(&job->session_list);
1009 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X).\n",
1010 mali_pp_job_is_virtual(job) ? "virtual" : "physical",
1011 mali_pp_job_get_id(job), job));
1013 mali_pp_scheduler_unlock();
1015 /* Release tracker. If other trackers are waiting on this tracker, this could
1016 * trigger activation. The returned scheduling mask can be used to determine if we
1017 * have to schedule GP, PP or both. */
1018 schedule_mask = mali_timeline_tracker_release(&job->tracker);
1020 mali_pp_scheduler_lock();
1023 if (mali_group_is_virtual(group)) {
1024 /* Obey the policy. */
1025 virtual_group_state = VIRTUAL_GROUP_IDLE;
1028 /* If paused, then this was the last job, so wake up sleeping workers and return. */
1029 if (pause_count > 0) {
1030 /* Wake up sleeping workers. Their wake-up condition is that
1031 * num_slots == num_slots_idle, so unless we are done working, no
1032 * threads will actually be woken up.
1034 if (!mali_group_is_virtual(group)) {
1035 /* Move physical group to idle list. */
1036 _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
1039 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1040 trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
1043 _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
1045 mali_pp_scheduler_unlock();
1046 mali_group_unlock(group);
1049 /* Return job to user and delete it. */
1050 mali_pp_scheduler_finalize_job(job);
1053 /* A GP job might be queued by tracker release above,
1054 * make sure GP scheduler gets a chance to schedule this (if possible)
1056 mali_scheduler_schedule_from_mask(schedule_mask & ~MALI_SCHEDULER_MASK_PP, in_upper_half);
1061 /* Since this group just finished running a job, we can reschedule a new job on it
1064 /* By default, don't schedule on group. */
1065 schedule_on_group = MALI_FALSE;
1067 if (mali_group_is_virtual(group)) {
1068 /* Always schedule immediately on virtual group. */
1069 schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
1070 schedule_on_group = MALI_TRUE;
1071 } else if (0 < job_queue.depth && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP) || _mali_osk_list_empty(&group_list_idle))) {
1072 struct mali_pp_job *next_job = NULL;
1074 next_job = mali_pp_scheduler_get_physical_job();
1075 MALI_DEBUG_ASSERT_POINTER(next_job);
1077 /* If no new jobs have been queued or if this group is the only idle group, we can
1078 * schedule immediately on this group, unless we are GP bound and the next job would
1079 * benefit from all its sub jobs being started concurrently. */
1081 if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) && mali_pp_job_is_large_and_unstarted(next_job)) {
1082 /* We are GP bound and the job would benefit from all sub jobs being started
1083 * concurrently. Postpone scheduling until after group has been unlocked. */
1084 schedule_mask |= MALI_SCHEDULER_MASK_PP;
1085 schedule_on_group = MALI_FALSE;
1087 /* Schedule job immediately since we are not GP bound. */
1088 schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
1089 schedule_on_group = MALI_TRUE;
1093 if (schedule_on_group) {
1094 /* Schedule a new job on this group. */
1095 mali_pp_scheduler_schedule_on_group_and_unlock(group);
1097 /* Set group idle. Will rejoin virtual group, under appropriate conditions. */
1098 mali_pp_scheduler_set_group_idle_and_unlock(group);
1101 if (!schedule_on_group || MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
1102 if (MALI_SCHEDULER_MASK_PP & schedule_mask) {
1103 /* Schedule PP directly. */
1104 mali_pp_scheduler_schedule();
1105 schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
1108 /* Schedule other jobs that were activated. */
1109 mali_scheduler_schedule_from_mask(schedule_mask, in_upper_half);
1113 /* Return job to user and delete it. */
1114 mali_pp_scheduler_finalize_job(job);
1118 void mali_pp_scheduler_suspend(void)
1120 mali_pp_scheduler_lock();
1121 pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
1122 mali_pp_scheduler_unlock();
1124 /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
1125 * mali_pp_scheduler_suspended() function will be called. This will return true
1126 * if state is idle and pause_count > 0, so if the core is active this
1127 * will not do anything.
1129 _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended, NULL);
1132 void mali_pp_scheduler_resume(void)
1134 mali_pp_scheduler_lock();
1135 pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
1136 mali_pp_scheduler_unlock();
1137 if (0 == pause_count) {
1138 mali_pp_scheduler_schedule();
1142 mali_timeline_point mali_pp_scheduler_submit_job(struct mali_session_data *session, struct mali_pp_job *job)
1144 mali_timeline_point point;
1145 u32 fb_lookup_id = 0;
1147 MALI_DEBUG_ASSERT_POINTER(session);
1148 MALI_DEBUG_ASSERT_POINTER(job);
1150 mali_pp_scheduler_lock();
1152 fb_lookup_id = mali_pp_job_get_fb_lookup_id(job);
1153 MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
1155 /* Adding job to the lookup list used to quickly discard writeback units of queued jobs. */
1156 _mali_osk_list_addtail(&job->session_fb_lookup_list, &session->pp_job_fb_lookup_list[fb_lookup_id]);
1158 mali_pp_scheduler_unlock();
1160 /* We hold a PM reference for every job we hold queued (and running) */
1161 _mali_osk_pm_dev_ref_add();
1163 /* Add job to Timeline system. */
1164 point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_PP);
1169 _mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs)
1171 struct mali_session_data *session;
1172 struct mali_pp_job *job;
1173 mali_timeline_point point;
1174 u32 __user *timeline_point_ptr = NULL;
1176 MALI_DEBUG_ASSERT_POINTER(uargs);
1177 MALI_DEBUG_ASSERT_POINTER(ctx);
1179 session = (struct mali_session_data*)ctx;
1181 job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
1183 MALI_PRINT_ERROR(("Failed to create PP job.\n"));
1184 return _MALI_OSK_ERR_NOMEM;
1187 timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
1189 point = mali_pp_scheduler_submit_job(session, job);
1192 if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
1193 /* Let user space know that something failed after the job was started. */
1194 return _MALI_OSK_ERR_ITEM_NOT_FOUND;
1197 return _MALI_OSK_ERR_OK;
1200 _mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs)
1202 struct mali_session_data *session;
1203 _mali_uk_pp_and_gp_start_job_s kargs;
1204 struct mali_pp_job *pp_job;
1205 struct mali_gp_job *gp_job;
1206 u32 __user *timeline_point_ptr = NULL;
1207 mali_timeline_point point;
1209 MALI_DEBUG_ASSERT_POINTER(ctx);
1210 MALI_DEBUG_ASSERT_POINTER(uargs);
1212 session = (struct mali_session_data *) ctx;
1214 if (0 != _mali_osk_copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_and_gp_start_job_s))) {
1215 return _MALI_OSK_ERR_NOMEM;
1218 pp_job = mali_pp_job_create(session, kargs.pp_args, mali_scheduler_get_new_id());
1219 if (NULL == pp_job) {
1220 MALI_PRINT_ERROR(("Failed to create PP job.\n"));
1221 return _MALI_OSK_ERR_NOMEM;
1224 gp_job = mali_gp_job_create(session, kargs.gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
1225 if (NULL == gp_job) {
1226 MALI_PRINT_ERROR(("Failed to create GP job.\n"));
1227 mali_pp_job_delete(pp_job);
1228 return _MALI_OSK_ERR_NOMEM;
1231 timeline_point_ptr = (u32 __user *) pp_job->uargs.timeline_point_ptr;
1233 /* Submit GP job. */
1234 mali_gp_scheduler_submit_job(session, gp_job);
1237 /* Submit PP job. */
1238 point = mali_pp_scheduler_submit_job(session, pp_job);
1241 if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
1242 /* Let user space know that something failed after the jobs were started. */
1243 return _MALI_OSK_ERR_ITEM_NOT_FOUND;
1246 return _MALI_OSK_ERR_OK;
1249 _mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
1251 MALI_DEBUG_ASSERT_POINTER(args);
1252 MALI_DEBUG_ASSERT_POINTER(args->ctx);
1253 args->number_of_total_cores = num_cores;
1254 args->number_of_enabled_cores = enabled_cores;
1255 return _MALI_OSK_ERR_OK;
1258 u32 mali_pp_scheduler_get_num_cores_total(void)
1263 u32 mali_pp_scheduler_get_num_cores_enabled(void)
1265 return enabled_cores;
1268 _mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
1270 MALI_DEBUG_ASSERT_POINTER(args);
1271 MALI_DEBUG_ASSERT_POINTER(args->ctx);
1272 args->version = pp_version;
1273 return _MALI_OSK_ERR_OK;
1276 void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
1278 struct mali_session_data *session;
1279 struct mali_pp_job *job;
1280 struct mali_pp_job *tmp;
1283 MALI_DEBUG_ASSERT_POINTER(args);
1284 MALI_DEBUG_ASSERT_POINTER(args->ctx);
1286 session = (struct mali_session_data*)args->ctx;
1288 fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
1290 mali_pp_scheduler_lock();
1292 /* Iterate over all jobs for given frame builder_id. */
1293 _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pp_job_fb_lookup_list[fb_lookup_id], struct mali_pp_job, session_fb_lookup_list) {
1294 MALI_DEBUG_CODE(u32 disable_mask = 0);
1296 if (mali_pp_job_get_frame_builder_id(job) == (u32) args->fb_id) {
1297 MALI_DEBUG_CODE(disable_mask |= 0xD<<(4*3));
1298 if (args->wb0_memory == job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
1299 MALI_DEBUG_CODE(disable_mask |= 0x1<<(4*1));
1300 mali_pp_job_disable_wb0(job);
1302 if (args->wb1_memory == job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
1303 MALI_DEBUG_CODE(disable_mask |= 0x2<<(4*2));
1304 mali_pp_job_disable_wb1(job);
1306 if (args->wb2_memory == job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
1307 MALI_DEBUG_CODE(disable_mask |= 0x3<<(4*3));
1308 mali_pp_job_disable_wb2(job);
1310 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", disable_mask));
1312 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
1316 mali_pp_scheduler_unlock();
1319 void mali_pp_scheduler_abort_session(struct mali_session_data *session)
1322 struct mali_pp_job *job, *tmp_job;
1323 struct mali_group *group, *tmp_group;
1324 struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
1325 _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
1327 MALI_DEBUG_ASSERT_POINTER(session);
1328 MALI_DEBUG_ASSERT(session->is_aborting);
1330 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08X.\n", session));
1332 mali_pp_scheduler_lock();
1334 /* Find all jobs from the aborting session. */
1335 _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->pp_job_list, struct mali_pp_job, session_list) {
1336 /* Remove job from queue. */
1337 if (mali_pp_job_is_virtual(job)) {
1338 MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
1339 if (0 == mali_pp_job_get_first_unstarted_sub_job(job)) {
1340 --virtual_job_queue.depth;
1343 job_queue.depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
1346 _mali_osk_list_delinit(&job->list);
1347 _mali_osk_list_delinit(&job->session_fb_lookup_list);
1349 mali_pp_job_mark_unstarted_failed(job);
1351 if (mali_pp_job_is_complete(job)) {
1352 /* Job is complete, remove from session list. */
1353 _mali_osk_list_delinit(&job->session_list);
1355 /* Move job to local list for release and deletion. */
1356 _mali_osk_list_add(&job->list, &removed_jobs);
1358 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborted PP job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
1360 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Keeping partially started PP job %u (0x%08X) in session.\n", mali_pp_job_get_id(job), job));
1364 _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list) {
1365 groups[i++] = group;
1368 _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list) {
1369 groups[i++] = group;
1372 mali_pp_scheduler_unlock();
1374 /* Release and delete all found jobs from the aborting session. */
1375 _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
1376 mali_timeline_tracker_release(&job->tracker);
1377 mali_pp_job_delete(job);
1378 mali_pp_scheduler_job_completed();
1381 /* Abort any running jobs from the session. */
1383 mali_group_abort_session(groups[--i], session);
1386 if (mali_pp_scheduler_has_virtual_group()) {
1387 mali_group_abort_session(virtual_group, session);
1391 static mali_bool mali_pp_scheduler_is_suspended(void *data)
1395 /* This callback does not use the data pointer. */
1398 mali_pp_scheduler_lock();
1400 ret = pause_count > 0
1401 && _mali_osk_list_empty(&group_list_working)
1402 && VIRTUAL_GROUP_WORKING != virtual_group_state;
1404 mali_pp_scheduler_unlock();
1409 struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void)
1411 if (mali_pp_scheduler_has_virtual_group()) {
1412 return mali_group_get_pp_core(virtual_group);
1418 #if MALI_STATE_TRACKING
1419 u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
1422 struct mali_group *group;
1423 struct mali_group *temp;
1425 n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
1426 n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue.normal_pri) ? "empty" : "not empty");
1427 n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue.high_pri) ? "empty" : "not empty");
1428 n += _mali_osk_snprintf(buf + n, size - n, "\n");
1430 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
1431 n += mali_group_dump_state(group, buf + n, size - n);
1434 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
1435 n += mali_group_dump_state(group, buf + n, size - n);
1438 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
1439 n += mali_group_dump_state(group, buf + n, size - n);
1442 if (mali_pp_scheduler_has_virtual_group()) {
1443 n += mali_group_dump_state(virtual_group, buf + n, size -n);
1446 n += _mali_osk_snprintf(buf + n, size - n, "\n");
1451 /* This function is intended for power on reset of all cores.
1452 * No locking is done for the list iteration, which can only be safe if the
1453 * scheduler is paused and all cores idle. That is always the case on init and
1455 void mali_pp_scheduler_reset_all_groups(void)
1457 struct mali_group *group, *temp;
1458 struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
1461 if (mali_pp_scheduler_has_virtual_group()) {
1462 mali_group_lock(virtual_group);
1463 mali_group_reset(virtual_group);
1464 mali_group_unlock(virtual_group);
1467 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
1468 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
1469 mali_pp_scheduler_lock();
1470 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
1471 groups[i++] = group;
1473 mali_pp_scheduler_unlock();
1476 group = groups[--i];
1478 mali_group_lock(group);
1479 mali_group_reset(group);
1480 mali_group_unlock(group);
1484 void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
1486 struct mali_group *group, *temp;
1487 struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
1490 if (mali_pp_scheduler_has_virtual_group()) {
1491 mali_group_zap_session(virtual_group, session);
1494 mali_pp_scheduler_lock();
1495 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
1496 groups[i++] = group;
1498 mali_pp_scheduler_unlock();
1501 mali_group_zap_session(groups[--i], session);
1505 /* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
1506 * before calling this function to avoid Mali powering down as HW is accessed.
1508 static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
1510 MALI_DEBUG_ASSERT_POINTER(group);
1512 mali_group_lock(group);
1514 if (MALI_GROUP_STATE_DISABLED != group->state) {
1515 mali_group_unlock(group);
1516 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled.\n", group));
1520 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p.\n", group));
1522 mali_pp_scheduler_lock();
1524 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
1527 if (mali_pp_scheduler_has_virtual_group()) {
1528 mali_bool update_hw;
1530 /* Add group to virtual group. */
1531 _mali_osk_list_delinit(&(group->pp_scheduler_list));
1532 group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
1534 mali_pp_scheduler_unlock();
1535 mali_group_unlock(group);
1537 mali_group_lock(virtual_group);
1539 update_hw = mali_pm_is_power_on();
1540 /* Get ref of group domain */
1541 mali_group_get_pm_domain_ref(group);
1543 MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
1544 MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
1547 mali_group_lock(group);
1548 mali_group_power_on_group(group);
1549 mali_group_reset(group);
1550 mali_group_unlock(group);
1553 mali_pp_scheduler_enable_empty_virtual();
1554 mali_group_add_group(virtual_group, group, update_hw);
1555 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Added to virtual group.\n", group));
1557 mali_group_unlock(virtual_group);
1559 /* Get ref of group domain */
1560 mali_group_get_pm_domain_ref(group);
1562 MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
1563 MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
1565 /* Put group on idle list. */
1566 if (mali_pm_is_power_on()) {
1567 mali_group_power_on_group(group);
1568 mali_group_reset(group);
1571 _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
1572 group->state = MALI_GROUP_STATE_IDLE;
1574 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Now on idle list.\n", group));
1575 mali_pp_scheduler_unlock();
1576 mali_group_unlock(group);
1580 void mali_pp_scheduler_enable_group(struct mali_group *group)
1582 MALI_DEBUG_ASSERT_POINTER(group);
1584 _mali_osk_pm_dev_ref_add_no_power_on();
1586 mali_pp_scheduler_enable_group_internal(group);
1588 _mali_osk_pm_dev_ref_dec_no_power_on();
1590 /* Pick up any jobs that might have been queued if all PP groups were disabled. */
1591 mali_pp_scheduler_schedule();
1594 static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
1596 if (mali_pp_scheduler_has_virtual_group()) {
1597 mali_group_lock(virtual_group);
1599 MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
1600 if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
1601 /* The group was in the process of being added to the virtual group. We
1602 * only need to change the state to reverse this. */
1603 group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
1604 } else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
1605 /* Remove group from virtual group. The state of the group will be
1606 * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
1607 mali_group_remove_group(virtual_group, group);
1609 mali_pp_scheduler_disable_empty_virtual();
1612 mali_group_unlock(virtual_group);
1615 mali_group_lock(group);
1616 mali_pp_scheduler_lock();
1618 MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
1619 || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
1620 || MALI_GROUP_STATE_DISABLED == group->state);
1622 if (MALI_GROUP_STATE_DISABLED == group->state) {
1623 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled.\n", group));
1625 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p.\n", group));
1628 _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
1629 group->state = MALI_GROUP_STATE_DISABLED;
1631 mali_group_power_off_group(group, MALI_TRUE);
1632 mali_group_put_pm_domain_ref(group);
1635 mali_pp_scheduler_unlock();
1636 mali_group_unlock(group);
1639 void mali_pp_scheduler_disable_group(struct mali_group *group)
1641 MALI_DEBUG_ASSERT_POINTER(group);
1643 mali_pp_scheduler_suspend();
1645 _mali_osk_pm_dev_ref_add_no_power_on();
1647 mali_pp_scheduler_disable_group_internal(group);
1649 _mali_osk_pm_dev_ref_dec_no_power_on();
1651 mali_pp_scheduler_resume();
1654 static void mali_pp_scheduler_notify_core_change(u32 num_cores)
1656 mali_bool done = MALI_FALSE;
1658 if (mali_is_mali450()) {
1663 * This function gets a bit complicated because we can't hold the session lock while
1664 * allocating notification objects.
1669 u32 num_sessions_alloc;
1670 u32 num_sessions_with_lock;
1671 u32 used_notification_objects = 0;
1672 _mali_osk_notification_t **notobjs;
1674 /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
1675 num_sessions_alloc = mali_session_get_count();
1676 if (0 == num_sessions_alloc) {
1677 /* No sessions to report to */
1681 notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
1682 if (NULL == notobjs) {
1683 MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
1684 /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
1688 for (i = 0; i < num_sessions_alloc; i++) {
1689 notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
1690 if (NULL != notobjs[i]) {
1691 _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
1692 data->number_of_enabled_cores = num_cores;
1694 MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
1698 mali_session_lock();
1700 /* number of sessions will not change while we hold the lock */
1701 num_sessions_with_lock = mali_session_get_count();
1703 if (num_sessions_alloc >= num_sessions_with_lock) {
1704 /* We have allocated enough notification objects for all the sessions atm */
1705 struct mali_session_data *session, *tmp;
1706 MALI_SESSION_FOREACH(session, tmp, link) {
1707 MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
1708 if (NULL != notobjs[used_notification_objects]) {
1709 mali_session_send_notification(session, notobjs[used_notification_objects]);
1710 notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
1712 used_notification_objects++;
1717 mali_session_unlock();
1719 /* Delete any remaining/unused notification objects */
1720 for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
1721 if (NULL != notobjs[used_notification_objects]) {
1722 _mali_osk_notification_delete(notobjs[used_notification_objects]);
1726 _mali_osk_free(notobjs);
1730 static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
1732 MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
1734 _mali_osk_pm_dev_ref_add_no_power_on();
1735 _mali_osk_pm_dev_barrier();
1737 while (target_core_nr > enabled_cores) {
1739 * If there are any cores which do not belong to any domain,
1740 * then these will always be found at the head of the list and
1741 * we'll thus enabled these first.
1744 mali_pp_scheduler_lock();
1746 if (!_mali_osk_list_empty(&group_list_disabled)) {
1747 struct mali_group *group;
1749 group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
1751 MALI_DEBUG_ASSERT_POINTER(group);
1752 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
1754 mali_pp_scheduler_unlock();
1756 mali_pp_scheduler_enable_group_internal(group);
1758 mali_pp_scheduler_unlock();
1759 break; /* no more groups on disabled list */
1763 _mali_osk_pm_dev_ref_dec_no_power_on();
1765 mali_pp_scheduler_schedule();
1768 static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
1770 MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
1772 mali_pp_scheduler_suspend();
1774 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
1776 _mali_osk_pm_dev_ref_add_no_power_on();
1778 if (NULL != mali_pmu_get_global_pmu_core()) {
1781 for (i = MALI_MAX_NUMBER_OF_DOMAINS - 1; i >= 0; i--) {
1782 if (target_core_nr < enabled_cores) {
1783 struct mali_pm_domain *domain;
1785 domain = mali_pm_domain_get_from_index(i);
1787 /* Domain is valid and has pp cores */
1788 if ((NULL != domain) && (NULL != domain->group_list)) {
1789 struct mali_group *group;
1791 MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) {
1792 /* If group is pp core */
1793 if (NULL != mali_group_get_pp_core(group)) {
1794 mali_pp_scheduler_disable_group_internal(group);
1795 if (target_core_nr >= enabled_cores) {
1808 * Didn't find enough cores associated with a power domain,
1809 * so we need to disable cores which we can't power off with the PMU.
1810 * Start with physical groups used by the scheduler,
1811 * then remove physical from virtual if even more groups are needed.
1814 while (target_core_nr < enabled_cores) {
1815 mali_pp_scheduler_lock();
1816 if (!_mali_osk_list_empty(&group_list_idle)) {
1817 struct mali_group *group;
1819 group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
1820 MALI_DEBUG_ASSERT_POINTER(group);
1822 mali_pp_scheduler_unlock();
1824 mali_pp_scheduler_disable_group_internal(group);
1826 mali_pp_scheduler_unlock();
1827 break; /* No more physical groups */
1831 if (mali_pp_scheduler_has_virtual_group()) {
1832 while (target_core_nr < enabled_cores) {
1833 mali_group_lock(virtual_group);
1834 if (!_mali_osk_list_empty(&virtual_group->group_list)) {
1835 struct mali_group *group;
1837 group = _MALI_OSK_LIST_ENTRY(virtual_group->group_list.next, struct mali_group, group_list);
1838 MALI_DEBUG_ASSERT_POINTER(group);
1840 mali_group_unlock(virtual_group);
1842 mali_pp_scheduler_disable_group_internal(group);
1844 mali_group_unlock(virtual_group);
1845 break; /* No more physical groups in virtual group */
1850 _mali_osk_pm_dev_ref_dec_no_power_on();
1852 mali_pp_scheduler_resume();
1855 int mali_pp_scheduler_set_perf_level(unsigned int target_core_nr, mali_bool override)
1857 if (target_core_nr == enabled_cores) return 0;
1858 if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
1859 if (target_core_nr > num_cores) return -EINVAL;
1860 if (0 == target_core_nr) return -EINVAL;
1862 if (target_core_nr > enabled_cores) {
1863 mali_pp_scheduler_core_scale_up(target_core_nr);
1864 } else if (target_core_nr < enabled_cores) {
1865 mali_pp_scheduler_core_scale_down(target_core_nr);
1868 if (target_core_nr != enabled_cores) {
1869 MALI_DEBUG_PRINT(2, ("Core scaling failed, target number: %d, actual number: %d\n", target_core_nr, enabled_cores));
1872 mali_pp_scheduler_notify_core_change(enabled_cores);
1877 void mali_pp_scheduler_core_scaling_enable(void)
1879 /* PS: Core scaling is by default enabled */
1880 core_scaling_enabled = MALI_TRUE;
1883 void mali_pp_scheduler_core_scaling_disable(void)
1885 core_scaling_enabled = MALI_FALSE;
1888 mali_bool mali_pp_scheduler_core_scaling_is_enabled(void)
1890 return core_scaling_enabled;
1893 static void mali_pp_scheduler_job_queued(void)
1895 if (mali_utilization_enabled()) {
1897 * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
1898 * This will be fine because we only loose the tiny idle gap between jobs, but
1899 * we will instead get less utilization work to do (less locks taken)
1901 mali_utilization_pp_start();
1905 static void mali_pp_scheduler_job_completed(void)
1907 /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
1908 _mali_osk_pm_dev_ref_dec();
1910 if (mali_utilization_enabled()) {
1911 mali_utilization_pp_end();
1915 static void mali_pp_scheduler_abort_job_and_unlock_scheduler(struct mali_pp_job *job)
1917 MALI_DEBUG_ASSERT_POINTER(job);
1918 MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
1920 /* This job should not be on any lists. */
1921 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
1922 MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
1924 _mali_osk_list_delinit(&job->session_fb_lookup_list);
1926 mali_pp_scheduler_unlock();
1928 /* Release tracker. */
1929 mali_timeline_tracker_release(&job->tracker);
1932 static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job)
1934 _mali_osk_list_t *queue = NULL;
1935 mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
1936 struct mali_pp_job *iter, *tmp;
1938 MALI_DEBUG_ASSERT_POINTER(job);
1939 MALI_DEBUG_ASSERT_POINTER(job->session);
1941 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
1942 if (mali_pp_job_needs_dma_buf_mapping(job)) {
1943 mali_dma_buf_map_job(job);
1945 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
1947 mali_pp_scheduler_lock();
1949 if (unlikely(job->session->is_aborting)) {
1950 /* Before checking if the session is aborting, the scheduler must be locked. */
1951 MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
1953 MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job));
1955 mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
1958 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
1959 mali_pp_scheduler_deferred_job_delete(job);
1961 mali_pp_job_delete(job);
1962 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
1964 /* Since we are aborting we ignore the scheduler mask. */
1965 return MALI_SCHEDULER_MASK_EMPTY;
1968 mali_pp_scheduler_job_queued();
1970 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
1971 trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
1974 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
1976 job->cache_order = mali_scheduler_get_new_cache_order();
1978 /* Determine which queue the job should be added to. */
1979 if (mali_pp_job_is_virtual(job)) {
1980 if (job->session->use_high_priority_job_queue) {
1981 queue = &virtual_job_queue.high_pri;
1983 queue = &virtual_job_queue.normal_pri;
1986 virtual_job_queue.depth += 1;
1988 /* Set schedule bitmask if the virtual group is idle. */
1989 if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
1990 schedule_mask |= MALI_SCHEDULER_MASK_PP;
1993 if (job->session->use_high_priority_job_queue) {
1994 queue = &job_queue.high_pri;
1996 queue = &job_queue.normal_pri;
1999 job_queue.depth += mali_pp_job_get_sub_job_count(job);
2001 /* Set schedule bitmask if there are physical PP cores available, or if there is an
2002 * idle virtual group. */
2003 if (!_mali_osk_list_empty(&group_list_idle)
2004 || (mali_pp_scheduler_has_virtual_group()
2005 && (VIRTUAL_GROUP_IDLE == virtual_group_state))) {
2006 schedule_mask |= MALI_SCHEDULER_MASK_PP;
2010 /* Find position in queue where job should be added. */
2011 _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_pp_job, list) {
2012 if (mali_pp_job_should_start_after(job, iter)) {
2017 /* Add job to queue. */
2018 _mali_osk_list_add(&job->list, &iter->list);
2020 /* Add job to session list. */
2021 _mali_osk_list_addtail(&job->session_list, &(job->session->pp_job_list));
2023 MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
2024 mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
2025 mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
2027 mali_pp_scheduler_unlock();
2029 return schedule_mask;
2032 mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
2034 mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
2036 MALI_DEBUG_ASSERT_POINTER(job);
2037 MALI_DEBUG_ASSERT_POINTER(job->session);
2039 MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
2041 if (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & job->tracker.activation_error) {
2042 MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", mali_pp_job_get_id(job), job));
2044 mali_pp_scheduler_lock();
2045 mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
2047 mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
2048 mali_pp_scheduler_finalize_job(job);
2050 return MALI_SCHEDULER_MASK_EMPTY;
2053 /* PP job is ready to run, queue it. */
2055 #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
2056 if (mali_pp_job_needs_dma_buf_mapping(job)) {
2057 mali_pp_scheduler_deferred_job_queue(job);
2059 return MALI_SCHEDULER_MASK_EMPTY;
2061 #endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
2063 schedule_mask = mali_pp_scheduler_queue_job(job);
2065 return schedule_mask;