3 * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
8 * A copy of the licence is included with the program, and can also be obtained from Free Software
9 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 * Job Scheduler: Completely Fair Policy Implementation
19 #include <kbase/src/common/mali_kbase.h>
20 #include <kbase/src/common/mali_kbase_jm.h>
21 #include <kbase/src/common/mali_kbase_js.h>
22 #include <kbase/src/common/mali_kbase_js_policy_cfs.h>
25 * Define for when dumping is enabled.
26 * This should not be based on the instrumentation level as whether dumping is enabled for a particular level is down to the integrator.
27 * However this is being used for now as otherwise the cinstr headers would be needed.
29 #define CINSTR_DUMPING_ENABLED ( 2 == MALI_INSTRUMENTATION_LEVEL )
31 /** Fixed point constants used for runtime weight calculations */
32 #define WEIGHT_FIXEDPOINT_SHIFT 10
33 #define WEIGHT_TABLE_SIZE 40
34 #define WEIGHT_0_NICE (WEIGHT_TABLE_SIZE/2)
35 #define WEIGHT_0_VAL (1 << WEIGHT_FIXEDPOINT_SHIFT)
37 #define LOOKUP_VARIANT_MASK ((1u<<KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS) - 1u)
39 /** Core requirements that all the variants support */
40 #define JS_CORE_REQ_ALL_OTHERS \
41 ( BASE_JD_REQ_CF | BASE_JD_REQ_V | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_PERMON | BASE_JD_REQ_EXTERNAL_RESOURCES )
43 /** Context requirements the all the variants support */
44 #if BASE_HW_ISSUE_8987 != 0
45 /* In this HW workaround, restrict Compute-only contexts and Compute jobs onto job slot[2],
46 * which will ensure their affinity does not intersect GLES jobs */
47 #define JS_CTX_REQ_ALL_OTHERS \
48 ( KBASE_CTX_FLAG_CREATE_FLAGS_SET | KBASE_CTX_FLAG_PRIVILEGED )
49 #define JS_CORE_REQ_COMPUTE_SLOT \
51 #define JS_CORE_REQ_ONLY_COMPUTE_SLOT \
52 ( BASE_JD_REQ_ONLY_COMPUTE )
54 #else /* BASE_HW_ISSUE_8987 != 0 */
55 /* Otherwise, compute-only contexts/compute jobs can use any job slot */
56 #define JS_CTX_REQ_ALL_OTHERS \
57 ( KBASE_CTX_FLAG_CREATE_FLAGS_SET | KBASE_CTX_FLAG_PRIVILEGED | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE)
58 #define JS_CORE_REQ_COMPUTE_SLOT \
59 ( BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE )
60 #define JS_CORE_REQ_ONLY_COMPUTE_SLOT \
61 ( BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE )
63 #endif /* BASE_HW_ISSUE_8987 != 0 */
65 /* core_req variants are ordered by least restrictive first, so that our
66 * algorithm in cached_variant_idx_init picks the least restrictive variant for
67 * each job . Note that coherent_group requirement is added to all CS variants as the
68 * selection of job-slot does not depend on the coherency requirement. */
69 static const kbasep_atom_req core_req_variants[] ={
71 (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS),
72 (JS_CTX_REQ_ALL_OTHERS)
75 (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT),
76 (JS_CTX_REQ_ALL_OTHERS)
79 (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_T),
80 (JS_CTX_REQ_ALL_OTHERS)
83 /* The last variant is one guarenteed to support Compute contexts/job, or
84 * NSS jobs. In the case of a context that's specified as 'Only Compute', it'll not allow
85 * Tiler or Fragment jobs, and so those get rejected */
87 (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT | BASE_JD_REQ_NSS ),
88 (JS_CTX_REQ_ALL_OTHERS | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE)
92 #define NUM_CORE_REQ_VARIANTS NELEMS(core_req_variants)
94 static const u32 variants_supported_ss_state[] =
96 (1u << 0), /* js[0] uses variant 0 (FS list)*/
97 (1u << 2) | (1u << 1), /* js[1] uses variants 1 and 2 (CS and CS+T lists)*/
98 (1u << 3) /* js[2] uses variant 3 (Compute list) */
101 static const u32 variants_supported_nss_state[] =
103 (1u << 0), /* js[0] uses variant 0 (FS list)*/
104 (1u << 2) | (1u << 1), /* js[1] uses variants 1 and 2 (CS and CS+T lists)*/
105 (1u << 3) /* js[2] uses variant 3 (Compute/NSS list) */
108 /* Defines for easy asserts 'is scheduled'/'is queued'/'is neither queued norscheduled' */
109 #define KBASEP_JS_CHECKFLAG_QUEUED (1u << 0) /**< Check the queued state */
110 #define KBASEP_JS_CHECKFLAG_SCHEDULED (1u << 1) /**< Check the scheduled state */
111 #define KBASEP_JS_CHECKFLAG_IS_QUEUED (1u << 2) /**< Expect queued state to be set */
112 #define KBASEP_JS_CHECKFLAG_IS_SCHEDULED (1u << 3) /**< Expect scheduled state to be set */
116 KBASEP_JS_CHECK_NOTQUEUED = KBASEP_JS_CHECKFLAG_QUEUED,
117 KBASEP_JS_CHECK_NOTSCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED,
118 KBASEP_JS_CHECK_QUEUED = KBASEP_JS_CHECKFLAG_QUEUED | KBASEP_JS_CHECKFLAG_IS_QUEUED,
119 KBASEP_JS_CHECK_SCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED | KBASEP_JS_CHECKFLAG_IS_SCHEDULED
122 typedef u32 kbasep_js_check;
128 /* Table autogenerated using util built from: kbase/scripts/gen_cfs_weight_of_prio.c */
131 static const int weight_of_priority[] =
133 /* -20 */ 11, 14, 18, 23,
134 /* -16 */ 29, 36, 45, 56,
135 /* -12 */ 70, 88, 110, 137,
136 /* -8 */ 171, 214, 268, 335,
137 /* -4 */ 419, 524, 655, 819,
138 /* 0 */ 1024, 1280, 1600, 2000,
139 /* 4 */ 2500, 3125, 3906, 4883,
140 /* 8 */ 6104, 7630, 9538, 11923,
141 /* 12 */ 14904, 18630, 23288, 29110,
142 /* 16 */ 36388, 45485, 56856, 71070
146 * @note There is nothing to stop the priority of the ctx containing \a
147 * ctx_info changing during or immediately after this function is called
148 * (because its jsctx_mutex cannot be held during IRQ). Therefore, this
149 * function should only be seen as a heuristic guide as to the priority weight
152 STATIC u64 priority_weight(kbasep_js_policy_cfs_ctx *ctx_info, u32 time_us)
156 priority = ctx_info->process_priority + ctx_info->bag_priority;
158 /* Adjust runtime_us using priority weight if required */
159 if(priority != 0 && time_us != 0)
161 int clamped_priority;
163 /* Clamp values to min..max weights */
164 if(priority > OSK_PROCESS_PRIORITY_MAX)
166 clamped_priority = OSK_PROCESS_PRIORITY_MAX;
168 else if(priority < OSK_PROCESS_PRIORITY_MIN)
170 clamped_priority = OSK_PROCESS_PRIORITY_MIN;
174 clamped_priority = priority;
177 /* Fixed point multiplication */
178 time_delta_us = ((u64)time_us * weight_of_priority[WEIGHT_0_NICE + clamped_priority]);
179 /* Remove fraction */
180 time_delta_us = time_delta_us >> WEIGHT_FIXEDPOINT_SHIFT;
181 /* Make sure the time always increases */
182 if(0 == time_delta_us)
189 time_delta_us = time_us;
192 return time_delta_us;
195 #if KBASE_TRACE_ENABLE != 0
196 STATIC int kbasep_js_policy_trace_get_refcnt_nolock( kbase_device *kbdev, kbase_context *kctx )
198 kbasep_js_device_data *js_devdata;
202 js_devdata = &kbdev->js_data;
205 if ( as_nr != KBASEP_AS_NR_INVALID )
207 kbasep_js_per_as_data *js_per_as_data;
208 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
210 refcnt = js_per_as_data->as_busy_refcount;
216 STATIC INLINE int kbasep_js_policy_trace_get_refcnt( kbase_device *kbdev, kbase_context *kctx )
218 kbasep_js_device_data *js_devdata;
221 js_devdata = &kbdev->js_data;
223 osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
224 refcnt = kbasep_js_policy_trace_get_refcnt_nolock( kbdev, kctx );
225 osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
229 #else /* KBASE_TRACE_ENABLE != 0 */
230 STATIC int kbasep_js_policy_trace_get_refcnt_nolock( kbase_device *kbdev, kbase_context *kctx )
232 CSTD_UNUSED( kbdev );
237 STATIC INLINE int kbasep_js_policy_trace_get_refcnt( kbase_device *kbdev, kbase_context *kctx )
239 CSTD_UNUSED( kbdev );
243 #endif /* KBASE_TRACE_ENABLE != 0 */
247 STATIC void kbasep_js_debug_check( kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag )
249 /* This function uses the ternary operator and non-explicit comparisons,
250 * because it makes for much shorter, easier to read code */
252 if ( check_flag & KBASEP_JS_CHECKFLAG_QUEUED )
255 mali_bool expect_queued;
256 is_queued = ( OSK_DLIST_MEMBER_OF( &policy_info->ctx_queue_head,
258 jctx.sched_info.runpool.policy_ctx.cfs.list ) )? MALI_TRUE: MALI_FALSE;
262 is_queued = ( OSK_DLIST_MEMBER_OF( &policy_info->ctx_rt_queue_head,
264 jctx.sched_info.runpool.policy_ctx.cfs.list ) )? MALI_TRUE: MALI_FALSE;
267 expect_queued = ( check_flag & KBASEP_JS_CHECKFLAG_IS_QUEUED ) ? MALI_TRUE : MALI_FALSE;
269 OSK_ASSERT_MSG( expect_queued == is_queued,
270 "Expected context %p to be %s but it was %s\n",
272 (expect_queued) ?"queued":"not queued",
273 (is_queued) ?"queued":"not queued" );
277 if ( check_flag & KBASEP_JS_CHECKFLAG_SCHEDULED )
279 mali_bool is_scheduled;
280 mali_bool expect_scheduled;
281 is_scheduled = ( OSK_DLIST_MEMBER_OF( &policy_info->scheduled_ctxs_head,
283 jctx.sched_info.runpool.policy_ctx.cfs.list ) )? MALI_TRUE: MALI_FALSE;
285 expect_scheduled = ( check_flag & KBASEP_JS_CHECKFLAG_IS_SCHEDULED ) ? MALI_TRUE : MALI_FALSE;
286 OSK_ASSERT_MSG( expect_scheduled == is_scheduled,
287 "Expected context %p to be %s but it was %s\n",
289 (expect_scheduled)?"scheduled":"not scheduled",
290 (is_scheduled) ?"scheduled":"not scheduled" );
295 #else /* MALI_DEBUG != 0 */
296 STATIC void kbasep_js_debug_check( kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag )
298 CSTD_UNUSED( policy_info );
300 CSTD_UNUSED( check_flag );
303 #endif /* MALI_DEBUG != 0 */
305 STATIC INLINE void set_slot_to_variant_lookup( u32 *bit_array, u32 slot_idx, u32 variants_supported )
307 u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
308 u32 word_idx = overall_bit_idx / 32;
309 u32 bit_idx = overall_bit_idx % 32;
311 OSK_ASSERT( slot_idx < BASE_JM_MAX_NR_SLOTS );
312 OSK_ASSERT( (variants_supported & ~LOOKUP_VARIANT_MASK) == 0 );
314 bit_array[word_idx] |= variants_supported << bit_idx;
318 STATIC INLINE u32 get_slot_to_variant_lookup( u32 *bit_array, u32 slot_idx )
320 u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
321 u32 word_idx = overall_bit_idx / 32;
322 u32 bit_idx = overall_bit_idx % 32;
326 OSK_ASSERT( slot_idx < BASE_JM_MAX_NR_SLOTS );
328 res = bit_array[word_idx] >> bit_idx;
329 res &= LOOKUP_VARIANT_MASK;
334 /* Check the core_req_variants: make sure that every job slot is satisifed by
335 * one of the variants. This checks that cached_variant_idx_init will produce a
336 * valid result for jobs that make maximum use of the job slots.
338 * @note The checks are limited to the job slots - this does not check that
339 * every context requirement is covered (because some are intentionally not
340 * supported, such as KBASE_CTX_FLAG_SUBMIT_DISABLED) */
342 STATIC void debug_check_core_req_variants( kbase_device *kbdev, kbasep_js_policy_cfs *policy_info )
344 kbasep_js_device_data *js_devdata;
348 js_devdata = &kbdev->js_data;
350 for ( j = 0 ; j < kbdev->nr_job_slots ; ++j )
352 base_jd_core_req job_core_req;
353 mali_bool found = MALI_FALSE;
355 job_core_req = js_devdata->js_reqs[j];
356 for ( i = 0; i < policy_info->num_core_req_variants ; ++i )
358 base_jd_core_req var_core_req;
359 var_core_req = policy_info->core_req_variants[i].core_req;
361 if ( (var_core_req & job_core_req) == job_core_req )
368 /* Early-out on any failure */
369 OSK_ASSERT_MSG( found != MALI_FALSE,
370 "Job slot %d features 0x%x not matched by core_req_variants. "
371 "Rework core_req_variants and vairants_supported_<...>_state[] to match\n",
378 STATIC void build_core_req_variants( kbase_device *kbdev, kbasep_js_policy_cfs *policy_info )
380 OSK_ASSERT( kbdev != NULL );
381 OSK_ASSERT( policy_info != NULL );
382 CSTD_UNUSED( kbdev );
384 OSK_ASSERT( NUM_CORE_REQ_VARIANTS <= KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS );
386 /* Assume a static set of variants */
387 OSK_MEMCPY( policy_info->core_req_variants, core_req_variants, sizeof(core_req_variants) );
389 policy_info->num_core_req_variants = NUM_CORE_REQ_VARIANTS;
391 OSK_DEBUG_CODE( debug_check_core_req_variants( kbdev, policy_info ) );
395 STATIC void build_slot_lookups( kbase_device *kbdev, kbasep_js_policy_cfs *policy_info )
399 OSK_ASSERT( kbdev != NULL );
400 OSK_ASSERT( policy_info != NULL );
402 OSK_ASSERT( kbdev->nr_job_slots <= NELEMS(variants_supported_ss_state) );
403 OSK_ASSERT( kbdev->nr_job_slots <= NELEMS(variants_supported_nss_state) );
405 /* Given the static set of variants, provide a static set of lookups */
406 for ( i = 0; i < kbdev->nr_job_slots; ++i )
408 set_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_ss_state,
410 variants_supported_ss_state[i] );
412 set_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_nss_state,
414 variants_supported_nss_state[i] );
419 STATIC mali_error cached_variant_idx_init( kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbase_jd_atom *atom )
421 kbasep_js_policy_cfs_job *job_info;
423 base_jd_core_req job_core_req;
424 kbase_context_flags ctx_flags;
425 kbasep_js_kctx_info *js_kctx_info;
427 OSK_ASSERT( policy_info != NULL );
428 OSK_ASSERT( kctx != NULL );
429 OSK_ASSERT( atom != NULL );
431 job_info = &atom->sched_info.cfs;
432 job_core_req = atom->core_req;
433 js_kctx_info = &kctx->jctx.sched_info;
434 ctx_flags = js_kctx_info->ctx.flags;
436 /* Pick a core_req variant that matches us. Since they're ordered by least
437 * restrictive first, it picks the least restrictive variant */
438 for ( i = 0; i < policy_info->num_core_req_variants ; ++i )
440 base_jd_core_req var_core_req;
441 kbase_context_flags var_ctx_req;
442 var_core_req = policy_info->core_req_variants[i].core_req;
443 var_ctx_req = policy_info->core_req_variants[i].ctx_req;
445 if ( (var_core_req & job_core_req) == job_core_req
446 && (var_ctx_req & ctx_flags) == ctx_flags )
448 job_info->cached_variant_idx = i;
449 return MALI_ERROR_NONE;
453 /* Could not find a matching requirement, this should only be caused by an
454 * attempt to attack the driver. */
455 return MALI_ERROR_FUNCTION_FAILED;
458 STATIC mali_bool dequeue_job( kbase_device *kbdev,
460 u32 variants_supported,
461 kbase_jd_atom **katom_ptr,
464 kbasep_js_device_data *js_devdata;
465 kbasep_js_policy_cfs *policy_info;
466 kbasep_js_policy_cfs_ctx *ctx_info;
468 OSK_ASSERT( kbdev != NULL );
469 OSK_ASSERT( katom_ptr != NULL );
470 OSK_ASSERT( kctx != NULL );
472 js_devdata = &kbdev->js_data;
473 policy_info = &js_devdata->policy.cfs;
474 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
476 /* Only submit jobs from contexts that are allowed */
477 if ( kbasep_js_is_submit_allowed( js_devdata, kctx ) != MALI_FALSE )
479 /* Check each variant in turn */
480 while ( variants_supported != 0 )
484 variant_idx = osk_find_first_set_bit( variants_supported );
485 job_list = &ctx_info->job_list_head[variant_idx];
487 if ( OSK_DLIST_IS_EMPTY( job_list ) == MALI_FALSE )
489 /* Found a context with a matching job */
491 kbase_jd_atom *front_atom = OSK_DLIST_FRONT( job_list, kbase_jd_atom, sched_info.cfs.list );
492 KBASE_TRACE_ADD_SLOT( kbdev, JS_POLICY_DEQUEUE_JOB, front_atom->kctx, front_atom->user_atom,
493 front_atom->jc, job_slot_idx );
495 *katom_ptr = OSK_DLIST_POP_FRONT( job_list, kbase_jd_atom, sched_info.cfs.list );
497 (*katom_ptr)->sched_info.cfs.ticks = 0;
499 /* Put this context at the back of the Run Pool */
500 OSK_DLIST_REMOVE( &policy_info->scheduled_ctxs_head,
502 jctx.sched_info.runpool.policy_ctx.cfs.list );
503 OSK_DLIST_PUSH_BACK( &policy_info->scheduled_ctxs_head,
506 jctx.sched_info.runpool.policy_ctx.cfs.list );
511 variants_supported &= ~(1u << variant_idx);
513 /* All variants checked by here */
516 /* The context does not have a matching job */
522 * Hold the runpool_irq spinlock for this
524 OSK_STATIC_INLINE mali_bool timer_callback_should_run( kbase_device *kbdev )
526 kbasep_js_device_data *js_devdata;
529 OSK_ASSERT(kbdev != NULL);
530 js_devdata = &kbdev->js_data;
532 /* nr_user_contexts_running is updated with the runpool_mutex. However, the
533 * locking in the caller gives us a barrier that ensures nr_user_contexts is
534 * up-to-date for reading */
535 nr_running_ctxs = js_devdata->nr_user_contexts_running;
537 #if BASE_HW_ISSUE_9435 != 0
538 /* Timeouts would have to be 4x longer (due to micro-architectural design)
539 * to support OpenCL conformance tests, so only run the timer when there's:
540 * - 2 or more CL contexts
541 * - 1 or more GLES contexts
543 * NOTE: We will treat a context that has both Compute and Non-Compute jobs
544 * will be treated as an OpenCL context (hence, we don't check
545 * KBASEP_JS_CTX_ATTR_NON_COMPUTE).
548 s8 nr_compute_ctxs = kbasep_js_ctx_attr_count_on_runpool( kbdev, KBASEP_JS_CTX_ATTR_COMPUTE );
549 s8 nr_noncompute_ctxs = nr_running_ctxs - nr_compute_ctxs;
551 return (mali_bool)( nr_compute_ctxs >= 2 || nr_noncompute_ctxs > 0 );
553 #else /* BASE_HW_ISSUE_9435 != 0 */
554 /* Run the timer callback whenever you have at least 1 context */
555 return (mali_bool)(nr_running_ctxs > 0);
556 #endif /* BASE_HW_ISSUE_9435 != 0 */
559 static void timer_callback(void *data)
561 kbase_device *kbdev = (kbase_device*)data;
562 kbasep_js_device_data *js_devdata;
563 kbasep_js_policy_cfs *policy_info;
566 mali_bool reset_needed = MALI_FALSE;
568 OSK_ASSERT(kbdev != NULL);
570 js_devdata = &kbdev->js_data;
571 policy_info = &js_devdata->policy.cfs;
573 /* Loop through the slots */
574 for(s=0; s<kbdev->nr_job_slots; s++)
576 kbase_jm_slot *slot = kbase_job_slot_lock(kbdev, s);
577 kbase_jd_atom *atom = NULL;
579 if (kbasep_jm_nr_jobs_submitted(slot) > 0)
581 atom = kbasep_jm_peek_idx_submit_slot(slot, 0);
582 OSK_ASSERT( atom != NULL );
584 if ( kbasep_jm_is_dummy_workaround_job( atom ) != MALI_FALSE )
586 /* Prevent further use of the atom - never cause a soft-stop, hard-stop, or a GPU reset due to it. */
593 /* The current version of the model doesn't support Soft-Stop */
594 #if (BASE_HW_ISSUE_5736 == 0) || MALI_BACKEND_KERNEL
595 u32 ticks = atom->sched_info.cfs.ticks ++;
597 #if !CINSTR_DUMPING_ENABLED
598 if ( (atom->core_req & BASE_JD_REQ_NSS) == 0 )
600 /* Job is Soft-Stoppable */
601 if (ticks == js_devdata->soft_stop_ticks)
603 /* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
604 * Soft stop the slot so we can run other jobs.
606 OSK_PRINT_INFO( OSK_BASE_JM, "Soft-stop" );
608 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
609 kbase_job_slot_softstop(kbdev, s, atom);
612 else if (ticks == js_devdata->hard_stop_ticks_ss)
614 /* Job has been scheduled for at least js_devdata->hard_stop_ticks_ss ticks.
615 * It should have been soft-stopped by now. Hard stop the slot.
617 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
618 OSK_PRINT_WARN(OSK_BASE_JM, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", ticks, js_devdata->scheduling_tick_ns/1000000u );
619 kbase_job_slot_hardstop(atom->kctx, s, atom);
622 else if (ticks == js_devdata->gpu_reset_ticks_ss)
624 /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_ss ticks.
625 * It should have left the GPU by now. Signal that the GPU needs to be reset.
627 reset_needed = MALI_TRUE;
631 #endif /* !CINSTR_DUMPING_ENABLED */
633 /* Job is Non Soft-Stoppable */
634 if (ticks == js_devdata->soft_stop_ticks)
636 /* Job has been scheduled for at least js_devdata->soft_stop_ticks.
637 * Let's try to soft-stop it even if it's supposed to be NSS.
639 OSK_PRINT_INFO( OSK_BASE_JM, "Soft-stop" );
641 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
642 kbase_job_slot_softstop(kbdev, s, atom);
645 else if (ticks == js_devdata->hard_stop_ticks_nss)
647 /* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
648 * Hard stop the slot.
650 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
651 OSK_PRINT_WARN(OSK_BASE_JM, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", ticks, js_devdata->scheduling_tick_ns/1000000u );
652 kbase_job_slot_hardstop(atom->kctx, s, atom);
655 else if (ticks == js_devdata->gpu_reset_ticks_nss)
657 /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_nss ticks.
658 * It should have left the GPU by now. Signal that the GPU needs to be reset.
660 reset_needed = MALI_TRUE;
663 #endif /* (BASE_HW_ISSUE_5736 == 0) || MALI_BACKEND_KERNEL */
666 kbase_job_slot_unlock(kbdev, s);
671 OSK_PRINT_WARN(OSK_BASE_JM, "JS: Job has been on the GPU for too long");
672 if (kbase_prepare_to_reset_gpu(kbdev))
674 kbase_reset_gpu(kbdev);
678 /* the timer is re-issued if there is contexts in the run-pool */
679 osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
681 if (timer_callback_should_run(kbdev) != MALI_FALSE)
683 osk_err = osk_timer_start_ns(&policy_info->timer, js_devdata->scheduling_tick_ns);
684 if (OSK_ERR_NONE != osk_err)
686 policy_info->timer_running = MALI_FALSE;
691 KBASE_TRACE_ADD( kbdev, JS_POLICY_TIMER_END, NULL, NULL, 0u, 0u );
692 policy_info->timer_running = MALI_FALSE;
695 osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);
699 * Non-private functions
702 mali_error kbasep_js_policy_init( kbase_device *kbdev )
704 kbasep_js_device_data *js_devdata;
705 kbasep_js_policy_cfs *policy_info;
707 OSK_ASSERT( kbdev != NULL );
708 js_devdata = &kbdev->js_data;
709 policy_info = &js_devdata->policy.cfs;
711 OSK_DLIST_INIT( &policy_info->ctx_queue_head );
712 OSK_DLIST_INIT( &policy_info->scheduled_ctxs_head );
713 OSK_DLIST_INIT( &policy_info->ctx_rt_queue_head );
715 if (osk_timer_init(&policy_info->timer) != OSK_ERR_NONE)
717 return MALI_ERROR_FUNCTION_FAILED;
720 osk_timer_callback_set( &policy_info->timer, timer_callback, kbdev );
722 policy_info->timer_running = MALI_FALSE;
724 policy_info->head_runtime_us = 0;
726 /* Build up the core_req variants */
727 build_core_req_variants( kbdev, policy_info );
728 /* Build the slot to variant lookups */
729 build_slot_lookups(kbdev, policy_info );
731 return MALI_ERROR_NONE;
734 void kbasep_js_policy_term( kbasep_js_policy *js_policy )
736 kbasep_js_policy_cfs *policy_info;
738 OSK_ASSERT( js_policy != NULL );
739 policy_info = &js_policy->cfs;
741 /* ASSERT that there are no contexts queued */
742 OSK_ASSERT( OSK_DLIST_IS_EMPTY( &policy_info->ctx_queue_head ) != MALI_FALSE );
743 /* ASSERT that there are no contexts scheduled */
744 OSK_ASSERT( OSK_DLIST_IS_EMPTY( &policy_info->scheduled_ctxs_head ) != MALI_FALSE );
746 /* ASSERT that there are no contexts queued */
747 OSK_ASSERT( OSK_DLIST_IS_EMPTY( &policy_info->ctx_rt_queue_head ) != MALI_FALSE );
749 osk_timer_stop(&policy_info->timer);
750 osk_timer_term(&policy_info->timer);
753 mali_error kbasep_js_policy_init_ctx( kbase_device *kbdev, kbase_context *kctx )
755 kbasep_js_device_data *js_devdata;
756 kbasep_js_policy_cfs_ctx *ctx_info;
757 kbasep_js_policy_cfs *policy_info;
758 osk_process_priority prio;
761 OSK_ASSERT( kbdev != NULL );
762 OSK_ASSERT( kctx != NULL );
764 js_devdata = &kbdev->js_data;
765 policy_info = &kbdev->js_data.policy.cfs;
766 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
768 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_INIT_CTX, kctx, NULL, 0u,
769 kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
771 for ( i = 0 ; i < policy_info->num_core_req_variants ; ++i )
773 OSK_DLIST_INIT( &ctx_info->job_list_head[i] );
776 osk_get_process_priority(&prio);
777 ctx_info->process_rt_policy = prio.is_realtime;
778 ctx_info->process_priority = prio.priority;
779 ctx_info->bag_total_priority = 0;
780 ctx_info->bag_total_nr_atoms = 0;
782 /* Initial runtime (relative to least-run context runtime)
784 * This uses the Policy Queue's most up-to-date head_runtime_us by using the
785 * queue mutex to issue memory barriers - also ensure future updates to
786 * head_runtime_us occur strictly after this context is initialized */
787 osk_mutex_lock( &js_devdata->queue_mutex );
789 /* No need to hold the the runpool_irq.lock here, because we're initializing
790 * the value, and the context is definitely not being updated in the
791 * runpool at this point. The queue_mutex ensures the memory barrier. */
792 ctx_info->runtime_us = policy_info->head_runtime_us +
793 priority_weight(ctx_info,
794 (u64)js_devdata->cfs_ctx_runtime_init_slices * (u64)(js_devdata->ctx_timeslice_ns/1000u));
796 osk_mutex_unlock( &js_devdata->queue_mutex );
798 return MALI_ERROR_NONE;
801 void kbasep_js_policy_term_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
803 kbasep_js_policy_cfs_ctx *ctx_info;
804 kbasep_js_policy_cfs *policy_info;
807 OSK_ASSERT( js_policy != NULL );
808 OSK_ASSERT( kctx != NULL );
810 policy_info = &js_policy->cfs;
811 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
814 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
815 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u,
816 kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
819 /* ASSERT that no jobs are present */
820 for ( i = 0 ; i < policy_info->num_core_req_variants ; ++i )
822 OSK_ASSERT( OSK_DLIST_IS_EMPTY( &ctx_info->job_list_head[i] ) != MALI_FALSE );
833 void kbasep_js_policy_enqueue_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
835 kbasep_js_policy_cfs *policy_info;
836 kbasep_js_policy_cfs_ctx *ctx_info;
837 kbase_context *list_kctx = NULL;
838 kbasep_js_device_data *js_devdata;
839 osk_dlist *queue_head;
841 OSK_ASSERT( js_policy != NULL );
842 OSK_ASSERT( kctx != NULL );
844 policy_info = &js_policy->cfs;
845 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
846 js_devdata = CONTAINER_OF( js_policy, kbasep_js_device_data, policy );
849 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
850 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_ENQUEUE_CTX, kctx, NULL, 0u,
851 kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
854 /* ASSERT about scheduled-ness/queued-ness */
855 kbasep_js_debug_check( policy_info, kctx, KBASEP_JS_CHECK_NOTQUEUED );
857 /* Clamp the runtime to prevent DoS attacks through "stored-up" runtime */
858 if (policy_info->head_runtime_us > ctx_info->runtime_us
859 + (u64)js_devdata->cfs_ctx_runtime_min_slices * (u64)(js_devdata->ctx_timeslice_ns/1000u))
861 /* No need to hold the the runpool_irq.lock here, because we're essentially
862 * initializing the value, and the context is definitely not being updated in the
863 * runpool at this point. The queue_mutex held by the caller ensures the memory
865 ctx_info->runtime_us = policy_info->head_runtime_us
866 - (u64)js_devdata->cfs_ctx_runtime_min_slices * (u64)(js_devdata->ctx_timeslice_ns/1000u);
869 /* Find the position where the context should be enqueued */
870 if(ctx_info->process_rt_policy)
872 queue_head = &policy_info->ctx_rt_queue_head;
876 queue_head = &policy_info->ctx_queue_head;
879 OSK_DLIST_FOREACH( queue_head,
881 jctx.sched_info.runpool.policy_ctx.cfs.list,
884 kbasep_js_policy_cfs_ctx *list_ctx_info;
885 list_ctx_info = &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs;
887 if ( (kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0 )
892 if ( (list_ctx_info->runtime_us > ctx_info->runtime_us) &&
893 ((list_kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) == 0) )
899 /* Add the context to the queue */
900 if (OSK_DLIST_IS_VALID( list_kctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
902 OSK_DLIST_INSERT_BEFORE( queue_head,
906 jctx.sched_info.runpool.policy_ctx.cfs.list );
910 OSK_DLIST_PUSH_BACK( queue_head,
913 jctx.sched_info.runpool.policy_ctx.cfs.list );
917 mali_bool kbasep_js_policy_dequeue_head_ctx( kbasep_js_policy *js_policy, kbase_context **kctx_ptr )
919 kbasep_js_policy_cfs *policy_info;
920 kbase_context *head_ctx;
921 osk_dlist *queue_head;
923 OSK_ASSERT( js_policy != NULL );
924 OSK_ASSERT( kctx_ptr != NULL );
926 policy_info = &js_policy->cfs;
928 /* attempt to dequeue from the 'realttime' queue first */
929 if ( OSK_DLIST_IS_EMPTY( &policy_info->ctx_rt_queue_head ) != MALI_FALSE )
931 if ( OSK_DLIST_IS_EMPTY( &policy_info->ctx_queue_head ) != MALI_FALSE )
933 /* Nothing to dequeue */
938 queue_head = &policy_info->ctx_queue_head;
943 queue_head = &policy_info->ctx_rt_queue_head;
946 /* Contexts are dequeued from the front of the queue */
947 *kctx_ptr = OSK_DLIST_POP_FRONT( queue_head,
949 jctx.sched_info.runpool.policy_ctx.cfs.list );
952 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
953 kbase_context *kctx = *kctx_ptr;
954 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_DEQUEUE_HEAD_CTX, kctx, NULL, 0u,
955 kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
959 /* Update the head runtime */
960 head_ctx = OSK_DLIST_FRONT( queue_head,
962 jctx.sched_info.runpool.policy_ctx.cfs.list );
963 if (OSK_DLIST_IS_VALID( head_ctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
965 /* No need to hold the the runpool_irq.lock here for reading - the
966 * context is definitely not being updated in the runpool at this
967 * point. The queue_mutex held by the caller ensures the memory barrier. */
968 u64 head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
970 if (head_runtime > policy_info->head_runtime_us)
972 policy_info->head_runtime_us = head_runtime;
979 mali_bool kbasep_js_policy_try_evict_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
981 kbasep_js_policy_cfs_ctx *ctx_info;
982 kbasep_js_policy_cfs *policy_info;
983 mali_bool is_present;
984 osk_dlist *queue_head;
987 OSK_ASSERT( js_policy != NULL );
988 OSK_ASSERT( kctx != NULL );
990 policy_info = &js_policy->cfs;
991 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
993 if(ctx_info->process_rt_policy)
995 queue_head = &policy_info->ctx_rt_queue_head;
999 queue_head = &policy_info->ctx_queue_head;
1003 is_present = OSK_DLIST_MEMBER_OF( qhead,
1005 jctx.sched_info.runpool.policy_ctx.cfs.list );
1008 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1009 KBASE_TRACE_ADD_REFCOUNT_INFO( kbdev, JS_POLICY_TRY_EVICT_CTX, kctx, NULL, 0u,
1010 kbasep_js_policy_trace_get_refcnt( kbdev, kctx ), is_present);
1013 if ( is_present != MALI_FALSE )
1015 kbase_context *head_ctx;
1017 /* Remove the context */
1018 OSK_DLIST_REMOVE( qhead,
1020 jctx.sched_info.runpool.policy_ctx.cfs.list );
1023 /* Update the head runtime */
1024 head_ctx = OSK_DLIST_FRONT( qhead,
1026 jctx.sched_info.runpool.policy_ctx.cfs.list );
1027 if (OSK_DLIST_IS_VALID( head_ctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
1029 /* No need to hold the the runpool_irq.lock here for reading - the
1030 * context is definitely not being updated in the runpool at this
1031 * point. The queue_mutex held by the caller ensures the memory barrier. */
1032 u64 head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1034 if (head_runtime > policy_info->head_runtime_us)
1036 policy_info->head_runtime_us = head_runtime;
1044 void kbasep_js_policy_kill_all_ctx_jobs( kbasep_js_policy *js_policy, kbase_context *kctx )
1046 kbasep_js_policy_cfs *policy_info;
1047 kbasep_js_policy_cfs_ctx *ctx_info;
1050 OSK_ASSERT( js_policy != NULL );
1051 OSK_ASSERT( kctx != NULL );
1053 policy_info = &js_policy->cfs;
1054 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1057 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1058 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_KILL_ALL_CTX_JOBS, kctx, NULL, 0u,
1059 kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
1062 /* Kill jobs on each variant in turn */
1063 for ( i = 0; i < policy_info->num_core_req_variants; ++i )
1065 osk_dlist *job_list;
1066 job_list = &ctx_info->job_list_head[i];
1068 /* Call kbase_jd_cancel() on all kbase_jd_atoms in this list, whilst removing them from the list */
1069 OSK_DLIST_EMPTY_LIST( job_list, kbase_jd_atom, sched_info.cfs.list, kbase_jd_cancel );
1074 void kbasep_js_policy_runpool_add_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
1076 kbasep_js_policy_cfs *policy_info;
1077 kbasep_js_device_data *js_devdata;
1078 kbase_device *kbdev;
1081 OSK_ASSERT( js_policy != NULL );
1082 OSK_ASSERT( kctx != NULL );
1084 policy_info = &js_policy->cfs;
1085 js_devdata = CONTAINER_OF( js_policy, kbasep_js_device_data, policy );
1086 kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1089 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_RUNPOOL_ADD_CTX, kctx, NULL, 0u,
1090 kbasep_js_policy_trace_get_refcnt_nolock( kbdev, kctx ));
1093 /* ASSERT about scheduled-ness/queued-ness */
1094 kbasep_js_debug_check( policy_info, kctx, KBASEP_JS_CHECK_NOTSCHEDULED );
1096 /* All enqueued contexts go to the back of the runpool */
1097 OSK_DLIST_PUSH_BACK( &policy_info->scheduled_ctxs_head,
1100 jctx.sched_info.runpool.policy_ctx.cfs.list );
1102 if ( timer_callback_should_run(kbdev) != MALI_FALSE
1103 && policy_info->timer_running == MALI_FALSE )
1105 osk_err = osk_timer_start_ns(&policy_info->timer, js_devdata->scheduling_tick_ns);
1106 if (OSK_ERR_NONE == osk_err)
1108 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1109 KBASE_TRACE_ADD( kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, 0u );
1110 policy_info->timer_running = MALI_TRUE;
1115 void kbasep_js_policy_runpool_remove_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
1117 kbasep_js_policy_cfs *policy_info;
1119 OSK_ASSERT( js_policy != NULL );
1120 OSK_ASSERT( kctx != NULL );
1122 policy_info = &js_policy->cfs;
1125 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1126 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_RUNPOOL_REMOVE_CTX, kctx, NULL, 0u,
1127 kbasep_js_policy_trace_get_refcnt_nolock( kbdev, kctx ));
1130 /* ASSERT about scheduled-ness/queued-ness */
1131 kbasep_js_debug_check( policy_info, kctx, KBASEP_JS_CHECK_SCHEDULED );
1133 /* No searching or significant list maintenance required to remove this context */
1134 OSK_DLIST_REMOVE( &policy_info->scheduled_ctxs_head,
1136 jctx.sched_info.runpool.policy_ctx.cfs.list );
1139 mali_bool kbasep_js_policy_should_remove_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
1141 kbasep_js_policy_cfs_ctx *ctx_info;
1142 kbasep_js_policy_cfs *policy_info;
1143 kbase_context *head_ctx;
1144 kbasep_js_device_data *js_devdata;
1145 osk_dlist *queue_head;
1147 OSK_ASSERT( js_policy != NULL );
1148 OSK_ASSERT( kctx != NULL );
1150 policy_info = &js_policy->cfs;
1151 ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1152 js_devdata = CONTAINER_OF( js_policy, kbasep_js_device_data, policy );
1154 if(ctx_info->process_rt_policy)
1156 queue_head = &policy_info->ctx_rt_queue_head;
1160 queue_head = &policy_info->ctx_queue_head;
1163 head_ctx = OSK_DLIST_FRONT( queue_head,
1165 jctx.sched_info.runpool.policy_ctx.cfs.list );
1166 if (OSK_DLIST_IS_VALID( head_ctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
1168 u64 head_runtime_us = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1170 if ((head_runtime_us + priority_weight(ctx_info, (u64)(js_devdata->ctx_timeslice_ns/1000u)))
1171 < ctx_info->runtime_us)
1173 /* The context is scheduled out if it's not the least-run context anymore.
1174 * The "real" head runtime is used instead of the cached runtime so the current
1175 * context is not scheduled out when there is less contexts than address spaces.
1185 * Job Chain Management
1188 mali_error kbasep_js_policy_init_job( kbasep_js_policy *js_policy, kbase_jd_atom *atom )
1190 kbasep_js_policy_cfs_ctx *ctx_info;
1191 kbasep_js_policy_cfs *policy_info;
1192 kbase_context *parent_ctx;
1194 OSK_ASSERT( js_policy != NULL );
1195 OSK_ASSERT( atom != NULL );
1196 parent_ctx = atom->kctx;
1197 OSK_ASSERT( parent_ctx != NULL );
1199 policy_info = &js_policy->cfs;
1200 ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1202 /* Adjust context priority to include the new job */
1203 ctx_info->bag_total_nr_atoms++;
1204 ctx_info->bag_total_priority += atom->nice_prio;
1206 /* Get average priority and convert to NICE range -20..19 */
1207 if(ctx_info->bag_total_nr_atoms)
1209 ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
1212 /* Determine the job's index into the job list head, will return error if the
1213 * atom is malformed and so is reported. */
1214 return cached_variant_idx_init( policy_info, parent_ctx, atom );
1217 void kbasep_js_policy_term_job( kbasep_js_policy *js_policy, kbase_jd_atom *atom )
1219 kbasep_js_policy_cfs_job *job_info;
1220 kbasep_js_policy_cfs_ctx *ctx_info;
1221 kbase_context *parent_ctx;
1223 OSK_ASSERT( js_policy != NULL );
1224 CSTD_UNUSED(js_policy);
1225 OSK_ASSERT( atom != NULL );
1226 parent_ctx = atom->kctx;
1227 OSK_ASSERT( parent_ctx != NULL );
1229 job_info = &atom->sched_info.cfs;
1230 ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1232 /* Adjust context priority to no longer include removed job */
1233 OSK_ASSERT(ctx_info->bag_total_nr_atoms > 0);
1234 ctx_info->bag_total_nr_atoms--;
1235 ctx_info->bag_total_priority -= atom->nice_prio;
1236 OSK_ASSERT(ctx_info->bag_total_priority >= 0);
1238 /* Get average priority and convert to NICE range -20..19 */
1239 if(ctx_info->bag_total_nr_atoms)
1241 ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
1244 /* In any case, we'll ASSERT that this job was correctly removed from the relevant lists */
1245 OSK_ASSERT( OSK_DLIST_MEMBER_OF( &ctx_info->job_list_head[job_info->cached_variant_idx],
1247 sched_info.cfs.list ) == MALI_FALSE );
1250 mali_bool kbasep_js_policy_dequeue_job( kbase_device *kbdev,
1252 kbase_jd_atom **katom_ptr )
1254 kbasep_js_device_data *js_devdata;
1255 kbasep_js_policy_cfs *policy_info;
1256 kbase_context *kctx;
1257 u32 variants_supported;
1259 OSK_ASSERT( kbdev != NULL );
1260 OSK_ASSERT( katom_ptr != NULL );
1261 OSK_ASSERT( job_slot_idx < BASE_JM_MAX_NR_SLOTS );
1263 js_devdata = &kbdev->js_data;
1264 policy_info = &js_devdata->policy.cfs;
1266 /* Get the variants for this slot */
1267 if ( kbasep_js_ctx_attr_count_on_runpool( kbdev, KBASEP_JS_CTX_ATTR_NSS ) == 0 )
1270 variants_supported = get_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_ss_state, job_slot_idx );
1275 variants_supported = get_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_nss_state, job_slot_idx );
1278 /* First pass through the runpool we consider the realtime priority jobs */
1279 OSK_DLIST_FOREACH( &policy_info->scheduled_ctxs_head,
1281 jctx.sched_info.runpool.policy_ctx.cfs.list,
1284 if(kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy)
1286 if(dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx))
1288 /* Realtime policy job matched */
1294 /* Second pass through the runpool we consider the non-realtime priority jobs */
1295 OSK_DLIST_FOREACH( &policy_info->scheduled_ctxs_head,
1297 jctx.sched_info.runpool.policy_ctx.cfs.list,
1300 if(kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE)
1302 if(dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx))
1304 /* Non-realtime policy job matched */
1310 /* By this point, no contexts had a matching job */
1314 mali_bool kbasep_js_policy_dequeue_job_irq( kbase_device *kbdev,
1316 kbase_jd_atom **katom_ptr )
1318 /* IRQ and non-IRQ variants of this are the same (though, the IRQ variant could be made faster) */
1320 /* KBASE_TRACE_ADD_SLOT( kbdev, JS_POLICY_DEQUEUE_JOB_IRQ, NULL, NULL, 0u,
1322 return kbasep_js_policy_dequeue_job( kbdev, job_slot_idx, katom_ptr );
1326 void kbasep_js_policy_enqueue_job( kbasep_js_policy *js_policy, kbase_jd_atom *katom )
1328 kbasep_js_policy_cfs_job *job_info;
1329 kbasep_js_policy_cfs_ctx *ctx_info;
1330 kbase_context *parent_ctx;
1332 OSK_ASSERT( js_policy != NULL );
1333 OSK_ASSERT( katom != NULL );
1334 parent_ctx = katom->kctx;
1335 OSK_ASSERT( parent_ctx != NULL );
1337 job_info = &katom->sched_info.cfs;
1338 ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1341 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1342 KBASE_TRACE_ADD( kbdev, JS_POLICY_ENQUEUE_JOB, katom->kctx, katom->user_atom, katom->jc,
1346 OSK_DLIST_PUSH_BACK( &ctx_info->job_list_head[job_info->cached_variant_idx],
1349 sched_info.cfs.list );
1352 void kbasep_js_policy_log_job_result( kbasep_js_policy *js_policy, kbase_jd_atom *katom, u32 time_spent_us )
1354 kbasep_js_policy_cfs_ctx *ctx_info;
1355 kbase_context *parent_ctx;
1356 OSK_ASSERT( js_policy != NULL );
1357 OSK_ASSERT( katom != NULL );
1358 CSTD_UNUSED( js_policy );
1360 parent_ctx = katom->kctx;
1361 OSK_ASSERT( parent_ctx != NULL );
1363 ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1365 ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us);
1368 mali_bool kbasep_js_policy_ctx_has_priority( kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx )
1370 kbasep_js_policy_cfs_ctx *current_ctx_info;
1371 kbasep_js_policy_cfs_ctx *new_ctx_info;
1373 OSK_ASSERT( current_ctx != NULL );
1374 OSK_ASSERT( new_ctx != NULL );
1375 CSTD_UNUSED(js_policy);
1377 current_ctx_info = ¤t_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1378 new_ctx_info = &new_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1380 if((current_ctx_info->process_rt_policy == MALI_FALSE) &&
1381 (new_ctx_info->process_rt_policy == MALI_TRUE))
1386 if((current_ctx_info->process_rt_policy == new_ctx_info->process_rt_policy) &&
1387 (current_ctx_info->bag_priority > new_ctx_info->bag_priority))