Initial commit
[kernel/linux-3.0.git] / drivers / gpu / vithar / kbase / src / common / mali_kbase_js_policy_cfs.c
1 /*
2  *
3  * (C) COPYRIGHT 2011-2012 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
6  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7  * 
8  * A copy of the licence is included with the program, and can also be obtained from Free Software
9  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
10  * 
11  */
12
13
14
15 /*
16  * Job Scheduler: Completely Fair Policy Implementation
17  */
18
19 #include <kbase/src/common/mali_kbase.h>
20 #include <kbase/src/common/mali_kbase_jm.h>
21 #include <kbase/src/common/mali_kbase_js.h>
22 #include <kbase/src/common/mali_kbase_js_policy_cfs.h>
23
24 /**
25  * Define for when dumping is enabled.
26  * This should not be based on the instrumentation level as whether dumping is enabled for a particular level is down to the integrator.
27  * However this is being used for now as otherwise the cinstr headers would be needed.
28  */
29 #define CINSTR_DUMPING_ENABLED ( 2 == MALI_INSTRUMENTATION_LEVEL )
30
31 /** Fixed point constants used for runtime weight calculations */
32 #define WEIGHT_FIXEDPOINT_SHIFT 10
33 #define WEIGHT_TABLE_SIZE       40
34 #define WEIGHT_0_NICE           (WEIGHT_TABLE_SIZE/2)
35 #define WEIGHT_0_VAL            (1 << WEIGHT_FIXEDPOINT_SHIFT)
36
37 #define LOOKUP_VARIANT_MASK ((1u<<KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS) - 1u)
38
39 /** Core requirements that all the variants support */
40 #define JS_CORE_REQ_ALL_OTHERS \
41         ( BASE_JD_REQ_CF | BASE_JD_REQ_V | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_PERMON | BASE_JD_REQ_EXTERNAL_RESOURCES )
42
43 /** Context requirements the all the variants support */
44 #if BASE_HW_ISSUE_8987 != 0
45 /* In this HW workaround, restrict Compute-only contexts and Compute jobs onto job slot[2],
46  * which will ensure their affinity does not intersect GLES jobs */
47 #define JS_CTX_REQ_ALL_OTHERS \
48         ( KBASE_CTX_FLAG_CREATE_FLAGS_SET | KBASE_CTX_FLAG_PRIVILEGED )
49 #define JS_CORE_REQ_COMPUTE_SLOT \
50         ( BASE_JD_REQ_CS )
51 #define JS_CORE_REQ_ONLY_COMPUTE_SLOT \
52         ( BASE_JD_REQ_ONLY_COMPUTE )
53
54 #else /* BASE_HW_ISSUE_8987 != 0 */
55 /* Otherwise, compute-only contexts/compute jobs can use any job slot */
56 #define JS_CTX_REQ_ALL_OTHERS \
57         ( KBASE_CTX_FLAG_CREATE_FLAGS_SET | KBASE_CTX_FLAG_PRIVILEGED | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE)
58 #define JS_CORE_REQ_COMPUTE_SLOT \
59         ( BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE )
60 #define JS_CORE_REQ_ONLY_COMPUTE_SLOT \
61         ( BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE )
62
63 #endif /* BASE_HW_ISSUE_8987 != 0 */
64
65 /* core_req variants are ordered by least restrictive first, so that our
66  * algorithm in cached_variant_idx_init picks the least restrictive variant for
67  * each job . Note that coherent_group requirement is added to all CS variants as the
68  * selection of job-slot does not depend on the coherency requirement. */
69 static const kbasep_atom_req core_req_variants[] ={
70         {
71                 (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS),
72                 (JS_CTX_REQ_ALL_OTHERS)
73         },
74         {
75                 (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT),
76                 (JS_CTX_REQ_ALL_OTHERS)
77         },
78         {
79                 (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_T),
80                 (JS_CTX_REQ_ALL_OTHERS)
81         },
82
83         /* The last variant is one guarenteed to support Compute contexts/job, or
84          * NSS jobs. In the case of a context that's specified as 'Only Compute', it'll not allow
85          * Tiler or Fragment jobs, and so those get rejected */
86         {
87                 (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT | BASE_JD_REQ_NSS ),
88                 (JS_CTX_REQ_ALL_OTHERS | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE)
89         }
90 };
91
92 #define NUM_CORE_REQ_VARIANTS NELEMS(core_req_variants)
93
94 static const u32 variants_supported_ss_state[] =
95 {
96         (1u << 0),             /* js[0] uses variant 0 (FS list)*/
97         (1u << 2) | (1u << 1), /* js[1] uses variants 1 and 2 (CS and CS+T lists)*/
98         (1u << 3)              /* js[2] uses variant 3 (Compute list) */
99 };
100
101 static const u32 variants_supported_nss_state[] =
102 {
103         (1u << 0),             /* js[0] uses variant 0 (FS list)*/
104         (1u << 2) | (1u << 1), /* js[1] uses variants 1 and 2 (CS and CS+T lists)*/
105         (1u << 3)              /* js[2] uses variant 3 (Compute/NSS list) */
106 };
107
108 /* Defines for easy asserts 'is scheduled'/'is queued'/'is neither queued norscheduled' */
109 #define KBASEP_JS_CHECKFLAG_QUEUED       (1u << 0) /**< Check the queued state */
110 #define KBASEP_JS_CHECKFLAG_SCHEDULED    (1u << 1) /**< Check the scheduled state */
111 #define KBASEP_JS_CHECKFLAG_IS_QUEUED    (1u << 2) /**< Expect queued state to be set */
112 #define KBASEP_JS_CHECKFLAG_IS_SCHEDULED (1u << 3) /**< Expect scheduled state to be set */
113
114 enum
115 {
116         KBASEP_JS_CHECK_NOTQUEUED     = KBASEP_JS_CHECKFLAG_QUEUED,
117         KBASEP_JS_CHECK_NOTSCHEDULED  = KBASEP_JS_CHECKFLAG_SCHEDULED,
118         KBASEP_JS_CHECK_QUEUED        = KBASEP_JS_CHECKFLAG_QUEUED | KBASEP_JS_CHECKFLAG_IS_QUEUED,
119         KBASEP_JS_CHECK_SCHEDULED     = KBASEP_JS_CHECKFLAG_SCHEDULED | KBASEP_JS_CHECKFLAG_IS_SCHEDULED
120 };
121
122 typedef u32 kbasep_js_check;
123
124 /*
125  * Private Functions
126  */
127
128 /* Table autogenerated using util built from: kbase/scripts/gen_cfs_weight_of_prio.c */
129
130 /* weight = 1.25 */
131 static const int weight_of_priority[] =
132 {
133         /*  -20 */     11,      14,      18,      23,
134         /*  -16 */     29,      36,      45,      56,
135         /*  -12 */     70,      88,     110,     137,
136         /*   -8 */    171,     214,     268,     335,
137         /*   -4 */    419,     524,     655,     819,
138         /*    0 */   1024,    1280,    1600,    2000,
139         /*    4 */   2500,    3125,    3906,    4883,
140         /*    8 */   6104,    7630,    9538,   11923,
141         /*   12 */  14904,   18630,   23288,   29110,
142         /*   16 */  36388,   45485,   56856,   71070
143 };
144
145 /**
146  * @note There is nothing to stop the priority of the ctx containing \a
147  * ctx_info changing during or immediately after this function is called
148  * (because its jsctx_mutex cannot be held during IRQ). Therefore, this
149  * function should only be seen as a heuristic guide as to the priority weight
150  * of the context.
151  */
152 STATIC u64 priority_weight(kbasep_js_policy_cfs_ctx *ctx_info, u32 time_us)
153 {
154         u64 time_delta_us;
155         int priority;
156         priority = ctx_info->process_priority + ctx_info->bag_priority;
157
158         /* Adjust runtime_us using priority weight if required */
159         if(priority != 0 && time_us != 0)
160         {
161                 int clamped_priority;
162
163                 /* Clamp values to min..max weights */
164                 if(priority > OSK_PROCESS_PRIORITY_MAX)
165                 {
166                         clamped_priority = OSK_PROCESS_PRIORITY_MAX;
167                 }
168                 else if(priority < OSK_PROCESS_PRIORITY_MIN)
169                 {
170                         clamped_priority = OSK_PROCESS_PRIORITY_MIN;
171                 }
172                 else
173                 {
174                         clamped_priority = priority;
175                 }
176
177                 /* Fixed point multiplication */
178                 time_delta_us = ((u64)time_us * weight_of_priority[WEIGHT_0_NICE + clamped_priority]);
179                 /* Remove fraction */
180                 time_delta_us = time_delta_us >> WEIGHT_FIXEDPOINT_SHIFT;
181                 /* Make sure the time always increases */
182                 if(0 == time_delta_us)
183                 {
184                         time_delta_us++;
185                 }
186         }
187         else
188         {
189                 time_delta_us = time_us;
190         }
191
192         return time_delta_us;
193 }
194
195 #if KBASE_TRACE_ENABLE != 0
196 STATIC int kbasep_js_policy_trace_get_refcnt_nolock( kbase_device *kbdev, kbase_context *kctx )
197 {
198         kbasep_js_device_data *js_devdata;
199         int as_nr;
200         int refcnt = 0;
201
202         js_devdata = &kbdev->js_data;
203
204         as_nr = kctx->as_nr;
205         if ( as_nr != KBASEP_AS_NR_INVALID )
206         {
207                 kbasep_js_per_as_data *js_per_as_data;
208                 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
209
210                 refcnt = js_per_as_data->as_busy_refcount;
211         }
212
213         return refcnt;
214 }
215
216 STATIC INLINE int kbasep_js_policy_trace_get_refcnt( kbase_device *kbdev, kbase_context *kctx )
217 {
218         kbasep_js_device_data *js_devdata;
219         int refcnt = 0;
220
221         js_devdata = &kbdev->js_data;
222
223         osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
224         refcnt = kbasep_js_policy_trace_get_refcnt_nolock( kbdev, kctx );
225         osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
226
227         return refcnt;
228 }
229 #else /* KBASE_TRACE_ENABLE != 0 */
230 STATIC int kbasep_js_policy_trace_get_refcnt_nolock( kbase_device *kbdev, kbase_context *kctx )
231 {
232         CSTD_UNUSED( kbdev );
233         CSTD_UNUSED( kctx );
234         return 0;
235 }
236
237 STATIC INLINE int kbasep_js_policy_trace_get_refcnt( kbase_device *kbdev, kbase_context *kctx )
238 {
239         CSTD_UNUSED( kbdev );
240         CSTD_UNUSED( kctx );
241         return 0;
242 }
243 #endif /* KBASE_TRACE_ENABLE != 0 */
244
245
246 #if MALI_DEBUG != 0
247 STATIC void kbasep_js_debug_check( kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag )
248 {
249         /* This function uses the ternary operator and non-explicit comparisons,
250          * because it makes for much shorter, easier to read code */
251
252         if ( check_flag & KBASEP_JS_CHECKFLAG_QUEUED )
253         {
254                 mali_bool is_queued;
255                 mali_bool expect_queued;
256                 is_queued = ( OSK_DLIST_MEMBER_OF( &policy_info->ctx_queue_head,
257                                                    kctx,
258                                                    jctx.sched_info.runpool.policy_ctx.cfs.list ) )? MALI_TRUE: MALI_FALSE;
259
260                 if(!is_queued)
261                 {
262                         is_queued = ( OSK_DLIST_MEMBER_OF( &policy_info->ctx_rt_queue_head,
263                                                            kctx,
264                                                            jctx.sched_info.runpool.policy_ctx.cfs.list ) )? MALI_TRUE: MALI_FALSE;
265                 }
266
267                 expect_queued = ( check_flag & KBASEP_JS_CHECKFLAG_IS_QUEUED ) ? MALI_TRUE : MALI_FALSE;
268
269                 OSK_ASSERT_MSG( expect_queued == is_queued,
270                                 "Expected context %p to be %s but it was %s\n",
271                                 kctx,
272                                 (expect_queued)   ?"queued":"not queued",
273                                 (is_queued)       ?"queued":"not queued" );
274
275         }
276
277         if ( check_flag & KBASEP_JS_CHECKFLAG_SCHEDULED )
278         {
279                 mali_bool is_scheduled;
280                 mali_bool expect_scheduled;
281                 is_scheduled = ( OSK_DLIST_MEMBER_OF( &policy_info->scheduled_ctxs_head,
282                                                       kctx,
283                                                       jctx.sched_info.runpool.policy_ctx.cfs.list ) )? MALI_TRUE: MALI_FALSE;
284
285                 expect_scheduled = ( check_flag & KBASEP_JS_CHECKFLAG_IS_SCHEDULED ) ? MALI_TRUE : MALI_FALSE;
286                 OSK_ASSERT_MSG( expect_scheduled == is_scheduled,
287                                 "Expected context %p to be %s but it was %s\n",
288                                 kctx,
289                                 (expect_scheduled)?"scheduled":"not scheduled",
290                                 (is_scheduled)    ?"scheduled":"not scheduled" );
291
292         }
293
294 }
295 #else /* MALI_DEBUG != 0 */
296 STATIC void kbasep_js_debug_check( kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag )
297 {
298         CSTD_UNUSED( policy_info );
299         CSTD_UNUSED( kctx );
300         CSTD_UNUSED( check_flag );
301         return;
302 }
303 #endif /* MALI_DEBUG != 0 */
304
305 STATIC INLINE void set_slot_to_variant_lookup( u32 *bit_array, u32 slot_idx, u32 variants_supported )
306 {
307         u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
308         u32 word_idx = overall_bit_idx / 32;
309         u32 bit_idx = overall_bit_idx % 32;
310
311         OSK_ASSERT( slot_idx < BASE_JM_MAX_NR_SLOTS );
312         OSK_ASSERT( (variants_supported & ~LOOKUP_VARIANT_MASK) == 0 );
313
314         bit_array[word_idx] |= variants_supported << bit_idx;
315 }
316
317
318 STATIC INLINE u32 get_slot_to_variant_lookup( u32 *bit_array, u32 slot_idx )
319 {
320         u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
321         u32 word_idx = overall_bit_idx / 32;
322         u32 bit_idx = overall_bit_idx % 32;
323
324         u32 res;
325
326         OSK_ASSERT( slot_idx < BASE_JM_MAX_NR_SLOTS );
327
328         res = bit_array[word_idx] >> bit_idx;
329         res &= LOOKUP_VARIANT_MASK;
330
331         return res;
332 }
333
334 /* Check the core_req_variants: make sure that every job slot is satisifed by
335  * one of the variants. This checks that cached_variant_idx_init will produce a
336  * valid result for jobs that make maximum use of the job slots.
337  *
338  * @note The checks are limited to the job slots - this does not check that
339  * every context requirement is covered (because some are intentionally not
340  * supported, such as KBASE_CTX_FLAG_SUBMIT_DISABLED) */
341 #if MALI_DEBUG
342 STATIC void debug_check_core_req_variants( kbase_device *kbdev, kbasep_js_policy_cfs *policy_info )
343 {
344         kbasep_js_device_data *js_devdata;
345         u32 i;
346         int j;
347
348         js_devdata = &kbdev->js_data;
349
350         for ( j = 0 ; j < kbdev->nr_job_slots ; ++j )
351         {
352                 base_jd_core_req job_core_req;
353                 mali_bool found = MALI_FALSE;
354
355                 job_core_req =  js_devdata->js_reqs[j];
356                 for ( i = 0; i < policy_info->num_core_req_variants ; ++i )
357                 {
358                         base_jd_core_req var_core_req;
359                         var_core_req = policy_info->core_req_variants[i].core_req;
360
361                         if ( (var_core_req & job_core_req) == job_core_req )
362                         {
363                                 found = MALI_TRUE;
364                                 break;
365                         }
366                 }
367
368                 /* Early-out on any failure */
369                 OSK_ASSERT_MSG( found != MALI_FALSE,
370                                 "Job slot %d features 0x%x not matched by core_req_variants. "
371                                 "Rework core_req_variants and vairants_supported_<...>_state[] to match\n",
372                                 j,
373                                 job_core_req );
374         }
375 }
376 #endif
377
378 STATIC void build_core_req_variants( kbase_device *kbdev, kbasep_js_policy_cfs *policy_info )
379 {
380         OSK_ASSERT( kbdev != NULL );
381         OSK_ASSERT( policy_info != NULL );
382         CSTD_UNUSED( kbdev );
383
384         OSK_ASSERT( NUM_CORE_REQ_VARIANTS <= KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS );
385
386         /* Assume a static set of variants */
387         OSK_MEMCPY( policy_info->core_req_variants, core_req_variants, sizeof(core_req_variants) );
388
389         policy_info->num_core_req_variants = NUM_CORE_REQ_VARIANTS;
390
391         OSK_DEBUG_CODE( debug_check_core_req_variants( kbdev, policy_info ) );
392 }
393
394
395 STATIC void build_slot_lookups( kbase_device *kbdev, kbasep_js_policy_cfs *policy_info )
396 {
397         s8 i;
398
399         OSK_ASSERT( kbdev != NULL );
400         OSK_ASSERT( policy_info != NULL );
401
402         OSK_ASSERT( kbdev->nr_job_slots <= NELEMS(variants_supported_ss_state) );
403         OSK_ASSERT( kbdev->nr_job_slots <= NELEMS(variants_supported_nss_state) );
404
405         /* Given the static set of variants, provide a static set of lookups */
406         for ( i = 0; i < kbdev->nr_job_slots; ++i )
407         {
408                 set_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_ss_state,
409                                             i,
410                                             variants_supported_ss_state[i] );
411
412                 set_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_nss_state,
413                                             i,
414                                             variants_supported_nss_state[i] );
415         }
416
417 }
418
419 STATIC mali_error cached_variant_idx_init( kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbase_jd_atom *atom )
420 {
421         kbasep_js_policy_cfs_job *job_info;
422         u32 i;
423         base_jd_core_req job_core_req;
424         kbase_context_flags ctx_flags;
425         kbasep_js_kctx_info *js_kctx_info;
426
427         OSK_ASSERT( policy_info != NULL );
428         OSK_ASSERT( kctx != NULL );
429         OSK_ASSERT( atom != NULL );
430
431         job_info = &atom->sched_info.cfs;
432         job_core_req = atom->core_req;
433         js_kctx_info = &kctx->jctx.sched_info;
434         ctx_flags = js_kctx_info->ctx.flags;
435
436         /* Pick a core_req variant that matches us. Since they're ordered by least
437          * restrictive first, it picks the least restrictive variant */
438         for ( i = 0; i < policy_info->num_core_req_variants ; ++i )
439         {
440                 base_jd_core_req var_core_req;
441                 kbase_context_flags var_ctx_req;
442                 var_core_req = policy_info->core_req_variants[i].core_req;
443                 var_ctx_req = policy_info->core_req_variants[i].ctx_req;
444                 
445                 if ( (var_core_req & job_core_req) == job_core_req
446                          && (var_ctx_req & ctx_flags) == ctx_flags )
447                 {
448                         job_info->cached_variant_idx = i;
449                         return MALI_ERROR_NONE;
450                 }
451         }
452
453         /* Could not find a matching requirement, this should only be caused by an
454          * attempt to attack the driver. */
455         return MALI_ERROR_FUNCTION_FAILED;
456 }
457
458 STATIC mali_bool dequeue_job( kbase_device *kbdev,
459                               kbase_context *kctx,
460                               u32 variants_supported,
461                               kbase_jd_atom **katom_ptr,
462                                                           int job_slot_idx)
463 {
464         kbasep_js_device_data *js_devdata;
465         kbasep_js_policy_cfs *policy_info;
466         kbasep_js_policy_cfs_ctx *ctx_info;
467
468         OSK_ASSERT( kbdev != NULL );
469         OSK_ASSERT( katom_ptr != NULL );
470         OSK_ASSERT( kctx != NULL );
471
472         js_devdata = &kbdev->js_data;
473         policy_info = &js_devdata->policy.cfs;
474         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
475
476         /* Only submit jobs from contexts that are allowed */
477         if ( kbasep_js_is_submit_allowed( js_devdata, kctx ) != MALI_FALSE )
478         {
479                 /* Check each variant in turn */
480                 while ( variants_supported != 0 )
481                 {
482                         long variant_idx;
483                         osk_dlist *job_list;
484                         variant_idx = osk_find_first_set_bit( variants_supported );
485                         job_list = &ctx_info->job_list_head[variant_idx];
486
487                         if ( OSK_DLIST_IS_EMPTY( job_list ) == MALI_FALSE )
488                         {
489                                 /* Found a context with a matching job */
490                                 {
491                                         kbase_jd_atom *front_atom = OSK_DLIST_FRONT( job_list, kbase_jd_atom, sched_info.cfs.list );
492                                         KBASE_TRACE_ADD_SLOT( kbdev, JS_POLICY_DEQUEUE_JOB, front_atom->kctx, front_atom->user_atom,
493                                                               front_atom->jc, job_slot_idx );
494                                 }
495                                 *katom_ptr = OSK_DLIST_POP_FRONT( job_list, kbase_jd_atom, sched_info.cfs.list );
496
497                                 (*katom_ptr)->sched_info.cfs.ticks = 0;
498
499                                 /* Put this context at the back of the Run Pool */
500                                 OSK_DLIST_REMOVE( &policy_info->scheduled_ctxs_head,
501                                                   kctx,
502                                                   jctx.sched_info.runpool.policy_ctx.cfs.list );
503                                 OSK_DLIST_PUSH_BACK( &policy_info->scheduled_ctxs_head,
504                                                      kctx,
505                                                      kbase_context,
506                                                      jctx.sched_info.runpool.policy_ctx.cfs.list );
507
508                                 return MALI_TRUE;
509                         }
510
511                         variants_supported &= ~(1u << variant_idx);
512                 }
513                 /* All variants checked by here */
514         }
515
516         /* The context does not have a  matching job */
517
518         return MALI_FALSE;
519 }
520
521 /**
522  * Hold the runpool_irq spinlock for this
523  */
524 OSK_STATIC_INLINE mali_bool timer_callback_should_run( kbase_device *kbdev )
525 {
526         kbasep_js_device_data *js_devdata;
527         s8 nr_running_ctxs;
528
529         OSK_ASSERT(kbdev != NULL);
530         js_devdata = &kbdev->js_data;
531
532         /* nr_user_contexts_running is updated with the runpool_mutex. However, the
533          * locking in the caller gives us a barrier that ensures nr_user_contexts is
534          * up-to-date for reading */
535         nr_running_ctxs = js_devdata->nr_user_contexts_running;
536
537 #if BASE_HW_ISSUE_9435 != 0
538         /* Timeouts would have to be 4x longer (due to micro-architectural design)
539          * to support OpenCL conformance tests, so only run the timer when there's:
540          * - 2 or more CL contexts
541          * - 1 or more GLES contexts
542          *
543          * NOTE: We will treat a context that has both Compute and Non-Compute jobs
544          * will be treated as an OpenCL context (hence, we don't check
545          * KBASEP_JS_CTX_ATTR_NON_COMPUTE).
546          */
547         {
548                 s8 nr_compute_ctxs = kbasep_js_ctx_attr_count_on_runpool( kbdev, KBASEP_JS_CTX_ATTR_COMPUTE );
549                 s8 nr_noncompute_ctxs = nr_running_ctxs - nr_compute_ctxs;
550
551                 return (mali_bool)( nr_compute_ctxs >= 2 || nr_noncompute_ctxs > 0 );
552         }
553 #else /* BASE_HW_ISSUE_9435 != 0 */
554         /* Run the timer callback whenever you have at least 1 context */
555         return (mali_bool)(nr_running_ctxs > 0);
556 #endif /* BASE_HW_ISSUE_9435 != 0 */
557 }
558
559 static void timer_callback(void *data)
560 {
561         kbase_device *kbdev = (kbase_device*)data;
562         kbasep_js_device_data *js_devdata;
563         kbasep_js_policy_cfs *policy_info;
564         int s;
565         osk_error osk_err;
566         mali_bool reset_needed = MALI_FALSE;
567
568         OSK_ASSERT(kbdev != NULL);
569
570         js_devdata = &kbdev->js_data;
571         policy_info = &js_devdata->policy.cfs;
572
573         /* Loop through the slots */
574         for(s=0; s<kbdev->nr_job_slots; s++)
575         {
576                 kbase_jm_slot *slot = kbase_job_slot_lock(kbdev, s);
577                 kbase_jd_atom *atom = NULL;
578
579                 if (kbasep_jm_nr_jobs_submitted(slot) > 0)
580                 {
581                         atom = kbasep_jm_peek_idx_submit_slot(slot, 0);
582                         OSK_ASSERT( atom != NULL );
583
584                         if ( kbasep_jm_is_dummy_workaround_job( atom ) != MALI_FALSE )
585                         {
586                                 /* Prevent further use of the atom - never cause a soft-stop, hard-stop, or a GPU reset due to it. */
587                                 atom = NULL;
588                         }
589                 }
590
591                 if ( atom != NULL )
592                 {
593 /* The current version of the model doesn't support Soft-Stop */
594 #if (BASE_HW_ISSUE_5736 == 0) || MALI_BACKEND_KERNEL
595                         u32 ticks = atom->sched_info.cfs.ticks ++;
596
597 #if !CINSTR_DUMPING_ENABLED
598                         if ( (atom->core_req & BASE_JD_REQ_NSS) == 0 )
599                         {
600                                 /* Job is Soft-Stoppable */
601                                 if (ticks == js_devdata->soft_stop_ticks)
602                                 {
603                                         /* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
604                                          * Soft stop the slot so we can run other jobs.
605                                          */
606                                         OSK_PRINT_INFO( OSK_BASE_JM, "Soft-stop" );
607
608 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
609                                         kbase_job_slot_softstop(kbdev, s, atom);
610 #endif
611                                 }
612                                 else if (ticks == js_devdata->hard_stop_ticks_ss)
613                                 {
614                                         /* Job has been scheduled for at least js_devdata->hard_stop_ticks_ss ticks.
615                                          * It should have been soft-stopped by now. Hard stop the slot.
616                                          */
617 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
618                                         OSK_PRINT_WARN(OSK_BASE_JM, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", ticks, js_devdata->scheduling_tick_ns/1000000u );
619                                         kbase_job_slot_hardstop(atom->kctx, s, atom);
620 #endif
621                                 }
622                                 else if (ticks == js_devdata->gpu_reset_ticks_ss)
623                                 {
624                                         /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_ss ticks.
625                                          * It should have left the GPU by now. Signal that the GPU needs to be reset.
626                                          */
627                                         reset_needed = MALI_TRUE;
628                                 }
629                         }
630                         else
631 #endif /* !CINSTR_DUMPING_ENABLED */
632                         {
633                                 /* Job is Non Soft-Stoppable */
634                                 if (ticks == js_devdata->soft_stop_ticks)
635                                 {
636                                         /* Job has been scheduled for at least js_devdata->soft_stop_ticks.
637                                          * Let's try to soft-stop it even if it's supposed to be NSS.
638                                          */
639                                         OSK_PRINT_INFO( OSK_BASE_JM, "Soft-stop" );
640
641 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
642                                         kbase_job_slot_softstop(kbdev, s, atom);
643 #endif
644                                 }
645                                 else if (ticks == js_devdata->hard_stop_ticks_nss)
646                                 {
647                                         /* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
648                                          * Hard stop the slot.
649                                          */
650 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
651                                         OSK_PRINT_WARN(OSK_BASE_JM, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", ticks, js_devdata->scheduling_tick_ns/1000000u );
652                                         kbase_job_slot_hardstop(atom->kctx, s, atom);
653 #endif
654                                 }
655                                 else if (ticks == js_devdata->gpu_reset_ticks_nss)
656                                 {
657                                         /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_nss ticks.
658                                          * It should have left the GPU by now. Signal that the GPU needs to be reset.
659                                          */
660                                         reset_needed = MALI_TRUE;
661                                 }
662                         }
663 #endif /* (BASE_HW_ISSUE_5736 == 0) || MALI_BACKEND_KERNEL */
664                 }
665
666                 kbase_job_slot_unlock(kbdev, s);
667         }
668         
669         if (reset_needed)
670         {
671                 OSK_PRINT_WARN(OSK_BASE_JM, "JS: Job has been on the GPU for too long");
672                 if (kbase_prepare_to_reset_gpu(kbdev))
673                 {
674                         kbase_reset_gpu(kbdev);
675                 }
676         }
677
678         /* the timer is re-issued if there is contexts in the run-pool */
679         osk_spinlock_irq_lock(&js_devdata->runpool_irq.lock);
680
681         if (timer_callback_should_run(kbdev) != MALI_FALSE)
682         {
683                 osk_err = osk_timer_start_ns(&policy_info->timer, js_devdata->scheduling_tick_ns);
684                 if (OSK_ERR_NONE != osk_err)
685                 {
686                         policy_info->timer_running = MALI_FALSE;
687                 }
688         }
689         else
690         {
691                 KBASE_TRACE_ADD( kbdev, JS_POLICY_TIMER_END, NULL, NULL, 0u, 0u );
692                 policy_info->timer_running = MALI_FALSE;
693         }
694
695         osk_spinlock_irq_unlock(&js_devdata->runpool_irq.lock);
696 }
697
698 /*
699  * Non-private functions
700  */
701
702 mali_error kbasep_js_policy_init( kbase_device *kbdev )
703 {
704         kbasep_js_device_data *js_devdata;
705         kbasep_js_policy_cfs *policy_info;
706
707         OSK_ASSERT( kbdev != NULL );
708         js_devdata = &kbdev->js_data;
709         policy_info = &js_devdata->policy.cfs;
710
711         OSK_DLIST_INIT( &policy_info->ctx_queue_head );
712         OSK_DLIST_INIT( &policy_info->scheduled_ctxs_head );
713         OSK_DLIST_INIT( &policy_info->ctx_rt_queue_head );
714
715         if (osk_timer_init(&policy_info->timer) != OSK_ERR_NONE)
716         {
717                 return MALI_ERROR_FUNCTION_FAILED;
718         }
719
720         osk_timer_callback_set( &policy_info->timer, timer_callback, kbdev );
721
722         policy_info->timer_running = MALI_FALSE;
723
724         policy_info->head_runtime_us = 0;
725
726         /* Build up the core_req variants */
727         build_core_req_variants( kbdev, policy_info );
728         /* Build the slot to variant lookups */
729         build_slot_lookups(kbdev, policy_info );
730
731         return MALI_ERROR_NONE;
732 }
733
734 void kbasep_js_policy_term( kbasep_js_policy *js_policy )
735 {
736         kbasep_js_policy_cfs     *policy_info;
737
738         OSK_ASSERT( js_policy != NULL );
739         policy_info = &js_policy->cfs;
740
741         /* ASSERT that there are no contexts queued */
742         OSK_ASSERT( OSK_DLIST_IS_EMPTY( &policy_info->ctx_queue_head ) != MALI_FALSE );
743         /* ASSERT that there are no contexts scheduled */
744         OSK_ASSERT( OSK_DLIST_IS_EMPTY( &policy_info->scheduled_ctxs_head ) != MALI_FALSE );
745
746         /* ASSERT that there are no contexts queued */
747         OSK_ASSERT( OSK_DLIST_IS_EMPTY( &policy_info->ctx_rt_queue_head ) != MALI_FALSE );
748
749         osk_timer_stop(&policy_info->timer);
750         osk_timer_term(&policy_info->timer);
751 }
752
753 mali_error kbasep_js_policy_init_ctx( kbase_device *kbdev, kbase_context *kctx )
754 {
755         kbasep_js_device_data *js_devdata;
756         kbasep_js_policy_cfs_ctx *ctx_info;
757         kbasep_js_policy_cfs     *policy_info;
758         osk_process_priority prio;
759         u32 i;
760
761         OSK_ASSERT( kbdev != NULL );
762         OSK_ASSERT( kctx != NULL );
763
764         js_devdata = &kbdev->js_data;
765         policy_info = &kbdev->js_data.policy.cfs;
766         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
767
768         KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_INIT_CTX, kctx, NULL, 0u,
769                                                           kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
770
771         for ( i = 0 ; i < policy_info->num_core_req_variants ; ++i )
772         {
773                 OSK_DLIST_INIT( &ctx_info->job_list_head[i] );
774         }
775
776         osk_get_process_priority(&prio);
777         ctx_info->process_rt_policy = prio.is_realtime;
778         ctx_info->process_priority = prio.priority;
779         ctx_info->bag_total_priority = 0;
780         ctx_info->bag_total_nr_atoms = 0;
781
782         /* Initial runtime (relative to least-run context runtime)
783          *
784          * This uses the Policy Queue's most up-to-date head_runtime_us by using the
785          * queue mutex to issue memory barriers - also ensure future updates to
786          * head_runtime_us occur strictly after this context is initialized */
787         osk_mutex_lock( &js_devdata->queue_mutex );
788
789         /* No need to hold the the runpool_irq.lock here, because we're initializing
790          * the value, and the context is definitely not being updated in the
791          * runpool at this point. The queue_mutex ensures the memory barrier. */
792         ctx_info->runtime_us = policy_info->head_runtime_us +
793                 priority_weight(ctx_info,
794                                                 (u64)js_devdata->cfs_ctx_runtime_init_slices * (u64)(js_devdata->ctx_timeslice_ns/1000u));
795
796         osk_mutex_unlock( &js_devdata->queue_mutex );
797
798         return MALI_ERROR_NONE;
799 }
800
801 void kbasep_js_policy_term_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
802 {
803         kbasep_js_policy_cfs_ctx *ctx_info;
804         kbasep_js_policy_cfs     *policy_info;
805         u32 i;
806
807         OSK_ASSERT( js_policy != NULL );
808         OSK_ASSERT( kctx != NULL );
809
810         policy_info = &js_policy->cfs;
811         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
812
813         {
814                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
815                 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u,
816                                                                   kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
817         }
818
819         /* ASSERT that no jobs are present */
820         for ( i = 0 ; i < policy_info->num_core_req_variants ; ++i )
821         {
822                 OSK_ASSERT( OSK_DLIST_IS_EMPTY( &ctx_info->job_list_head[i] ) != MALI_FALSE );
823         }
824
825         /* No work to do */
826 }
827
828
829 /*
830  * Context Management
831  */
832
833 void kbasep_js_policy_enqueue_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
834 {
835         kbasep_js_policy_cfs *policy_info;
836         kbasep_js_policy_cfs_ctx *ctx_info;
837         kbase_context *list_kctx = NULL;
838         kbasep_js_device_data *js_devdata;
839         osk_dlist *queue_head;
840
841         OSK_ASSERT( js_policy != NULL );
842         OSK_ASSERT( kctx != NULL );
843
844         policy_info = &js_policy->cfs;
845         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
846         js_devdata = CONTAINER_OF( js_policy, kbasep_js_device_data, policy );
847
848         {
849                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
850                 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_ENQUEUE_CTX, kctx, NULL, 0u,
851                                                                   kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
852         }
853
854         /* ASSERT about scheduled-ness/queued-ness */
855         kbasep_js_debug_check( policy_info, kctx, KBASEP_JS_CHECK_NOTQUEUED );
856
857         /* Clamp the runtime to prevent DoS attacks through "stored-up" runtime */
858         if (policy_info->head_runtime_us > ctx_info->runtime_us
859                 + (u64)js_devdata->cfs_ctx_runtime_min_slices * (u64)(js_devdata->ctx_timeslice_ns/1000u))
860         {
861                 /* No need to hold the the runpool_irq.lock here, because we're essentially
862                  * initializing the value, and the context is definitely not being updated in the
863                  * runpool at this point. The queue_mutex held by the caller ensures the memory
864                  * barrier. */
865                 ctx_info->runtime_us = policy_info->head_runtime_us
866                         - (u64)js_devdata->cfs_ctx_runtime_min_slices * (u64)(js_devdata->ctx_timeslice_ns/1000u);
867         }
868
869         /* Find the position where the context should be enqueued */
870         if(ctx_info->process_rt_policy)
871         {
872                 queue_head = &policy_info->ctx_rt_queue_head;
873         }
874         else
875         {
876                 queue_head = &policy_info->ctx_queue_head;
877         }
878
879         OSK_DLIST_FOREACH( queue_head,
880                            kbase_context,
881                            jctx.sched_info.runpool.policy_ctx.cfs.list,
882                            list_kctx )
883         {
884                 kbasep_js_policy_cfs_ctx *list_ctx_info;
885                 list_ctx_info  = &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs;
886
887                 if ( (kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0 )
888                 {
889                         break;
890                 }
891
892                 if ( (list_ctx_info->runtime_us > ctx_info->runtime_us) && 
893                      ((list_kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) == 0) )
894                 {
895                         break;
896                 }
897         }
898
899         /* Add the context to the queue */
900         if (OSK_DLIST_IS_VALID( list_kctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
901         {
902                 OSK_DLIST_INSERT_BEFORE( queue_head,
903                                          kctx,
904                                          list_kctx,
905                                          kbase_context,
906                                          jctx.sched_info.runpool.policy_ctx.cfs.list );
907         }
908         else
909         {
910                 OSK_DLIST_PUSH_BACK( queue_head,
911                                      kctx,
912                                      kbase_context,
913                                      jctx.sched_info.runpool.policy_ctx.cfs.list );
914         }
915 }
916
917 mali_bool kbasep_js_policy_dequeue_head_ctx( kbasep_js_policy *js_policy, kbase_context **kctx_ptr )
918 {
919         kbasep_js_policy_cfs *policy_info;
920         kbase_context *head_ctx;
921         osk_dlist *queue_head;
922
923         OSK_ASSERT( js_policy != NULL );
924         OSK_ASSERT( kctx_ptr != NULL );
925
926         policy_info = &js_policy->cfs;
927
928         /* attempt to dequeue from the 'realttime' queue first */
929         if ( OSK_DLIST_IS_EMPTY( &policy_info->ctx_rt_queue_head ) != MALI_FALSE )
930         {
931                 if ( OSK_DLIST_IS_EMPTY( &policy_info->ctx_queue_head ) != MALI_FALSE )
932                 {
933                         /* Nothing to dequeue */
934                         return MALI_FALSE;
935                 }
936                 else
937                 {
938                         queue_head = &policy_info->ctx_queue_head;
939                 }
940         }
941         else
942         {
943                 queue_head = &policy_info->ctx_rt_queue_head;
944         }
945
946         /* Contexts are dequeued from the front of the queue */
947         *kctx_ptr = OSK_DLIST_POP_FRONT( queue_head,
948                                          kbase_context,
949                                          jctx.sched_info.runpool.policy_ctx.cfs.list );
950
951         {
952                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
953                 kbase_context *kctx = *kctx_ptr;
954                 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_DEQUEUE_HEAD_CTX, kctx, NULL, 0u,
955                                                                   kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
956         }
957
958
959         /* Update the head runtime */
960         head_ctx = OSK_DLIST_FRONT( queue_head,
961                                     kbase_context,
962                                     jctx.sched_info.runpool.policy_ctx.cfs.list );
963         if (OSK_DLIST_IS_VALID( head_ctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
964         {
965                 /* No need to hold the the runpool_irq.lock here for reading - the
966                  * context is definitely not being updated in the runpool at this
967                  * point. The queue_mutex held by the caller ensures the memory barrier. */
968                 u64 head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
969
970                 if (head_runtime > policy_info->head_runtime_us)
971                 {
972                         policy_info->head_runtime_us = head_runtime;
973                 }
974         }
975
976         return MALI_TRUE;
977 }
978
979 mali_bool kbasep_js_policy_try_evict_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
980 {
981         kbasep_js_policy_cfs_ctx *ctx_info;
982         kbasep_js_policy_cfs     *policy_info;
983         mali_bool is_present;
984         osk_dlist *queue_head;
985         osk_dlist *qhead;
986
987         OSK_ASSERT( js_policy != NULL );
988         OSK_ASSERT( kctx != NULL );
989
990         policy_info = &js_policy->cfs;
991         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
992
993         if(ctx_info->process_rt_policy)
994         {
995                 queue_head = &policy_info->ctx_rt_queue_head;
996         }
997         else
998         {
999                 queue_head = &policy_info->ctx_queue_head;
1000         }
1001         qhead = queue_head;
1002
1003         is_present = OSK_DLIST_MEMBER_OF( qhead,
1004                                           kctx,
1005                                           jctx.sched_info.runpool.policy_ctx.cfs.list );
1006
1007         {
1008                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1009                 KBASE_TRACE_ADD_REFCOUNT_INFO( kbdev, JS_POLICY_TRY_EVICT_CTX, kctx, NULL, 0u,
1010                                                                            kbasep_js_policy_trace_get_refcnt( kbdev, kctx ), is_present);
1011         }
1012
1013         if ( is_present != MALI_FALSE )
1014         {
1015                 kbase_context *head_ctx;
1016                 qhead = queue_head;
1017                 /* Remove the context */
1018                 OSK_DLIST_REMOVE( qhead,
1019                                   kctx,
1020                                   jctx.sched_info.runpool.policy_ctx.cfs.list );
1021
1022                 qhead = queue_head;
1023                 /* Update the head runtime */
1024                 head_ctx = OSK_DLIST_FRONT( qhead,
1025                                             kbase_context,
1026                                             jctx.sched_info.runpool.policy_ctx.cfs.list );
1027                 if (OSK_DLIST_IS_VALID( head_ctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
1028                 {
1029                         /* No need to hold the the runpool_irq.lock here for reading - the
1030                          * context is definitely not being updated in the runpool at this
1031                          * point. The queue_mutex held by the caller ensures the memory barrier. */
1032                         u64 head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1033
1034                         if (head_runtime > policy_info->head_runtime_us)
1035                         {
1036                                 policy_info->head_runtime_us = head_runtime;
1037                         }
1038                 }
1039         }
1040
1041         return is_present;
1042 }
1043
1044 void kbasep_js_policy_kill_all_ctx_jobs( kbasep_js_policy *js_policy, kbase_context *kctx )
1045 {
1046         kbasep_js_policy_cfs *policy_info;
1047         kbasep_js_policy_cfs_ctx *ctx_info;
1048         u32 i;
1049
1050         OSK_ASSERT( js_policy != NULL );
1051         OSK_ASSERT( kctx != NULL );
1052
1053         policy_info = &js_policy->cfs;
1054         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1055
1056         {
1057                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1058                 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_KILL_ALL_CTX_JOBS, kctx, NULL, 0u,
1059                                                                   kbasep_js_policy_trace_get_refcnt( kbdev, kctx ));
1060         }
1061
1062         /* Kill jobs on each variant in turn */
1063         for ( i = 0; i < policy_info->num_core_req_variants; ++i )
1064         {
1065                 osk_dlist *job_list;
1066                 job_list = &ctx_info->job_list_head[i];
1067
1068                 /* Call kbase_jd_cancel() on all kbase_jd_atoms in this list, whilst removing them from the list */
1069                 OSK_DLIST_EMPTY_LIST( job_list, kbase_jd_atom, sched_info.cfs.list, kbase_jd_cancel );
1070         }
1071
1072 }
1073
1074 void kbasep_js_policy_runpool_add_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
1075 {
1076         kbasep_js_policy_cfs     *policy_info;
1077         kbasep_js_device_data    *js_devdata;
1078         kbase_device *kbdev;
1079         osk_error osk_err;
1080
1081         OSK_ASSERT( js_policy != NULL );
1082         OSK_ASSERT( kctx != NULL );
1083
1084         policy_info = &js_policy->cfs;
1085         js_devdata = CONTAINER_OF( js_policy, kbasep_js_device_data, policy );
1086         kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1087
1088         {
1089                 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_RUNPOOL_ADD_CTX, kctx, NULL, 0u,
1090                                                                   kbasep_js_policy_trace_get_refcnt_nolock( kbdev, kctx ));
1091         }
1092
1093         /* ASSERT about scheduled-ness/queued-ness */
1094         kbasep_js_debug_check( policy_info, kctx, KBASEP_JS_CHECK_NOTSCHEDULED );
1095
1096         /* All enqueued contexts go to the back of the runpool */
1097         OSK_DLIST_PUSH_BACK( &policy_info->scheduled_ctxs_head,
1098                              kctx,
1099                              kbase_context,
1100                              jctx.sched_info.runpool.policy_ctx.cfs.list );
1101
1102         if ( timer_callback_should_run(kbdev) != MALI_FALSE
1103                  && policy_info->timer_running == MALI_FALSE )
1104         {
1105                 osk_err = osk_timer_start_ns(&policy_info->timer, js_devdata->scheduling_tick_ns);
1106                 if (OSK_ERR_NONE == osk_err)
1107                 {
1108                         kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1109                         KBASE_TRACE_ADD( kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, 0u );
1110                         policy_info->timer_running = MALI_TRUE;
1111                 }
1112         }
1113 }
1114
1115 void kbasep_js_policy_runpool_remove_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
1116 {
1117         kbasep_js_policy_cfs     *policy_info;
1118
1119         OSK_ASSERT( js_policy != NULL );
1120         OSK_ASSERT( kctx != NULL );
1121
1122         policy_info = &js_policy->cfs;
1123
1124         {
1125                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1126                 KBASE_TRACE_ADD_REFCOUNT( kbdev, JS_POLICY_RUNPOOL_REMOVE_CTX, kctx, NULL, 0u,
1127                                                                   kbasep_js_policy_trace_get_refcnt_nolock( kbdev, kctx ));
1128         }
1129
1130         /* ASSERT about scheduled-ness/queued-ness */
1131         kbasep_js_debug_check( policy_info, kctx, KBASEP_JS_CHECK_SCHEDULED );
1132
1133         /* No searching or significant list maintenance required to remove this context */
1134         OSK_DLIST_REMOVE( &policy_info->scheduled_ctxs_head,
1135                           kctx,
1136                           jctx.sched_info.runpool.policy_ctx.cfs.list );
1137 }
1138
1139 mali_bool kbasep_js_policy_should_remove_ctx( kbasep_js_policy *js_policy, kbase_context *kctx )
1140 {
1141         kbasep_js_policy_cfs_ctx *ctx_info;
1142         kbasep_js_policy_cfs     *policy_info;
1143         kbase_context            *head_ctx;
1144         kbasep_js_device_data    *js_devdata;
1145         osk_dlist *queue_head;
1146
1147         OSK_ASSERT( js_policy != NULL );
1148         OSK_ASSERT( kctx != NULL );
1149
1150         policy_info = &js_policy->cfs;
1151         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1152         js_devdata = CONTAINER_OF( js_policy, kbasep_js_device_data, policy );
1153
1154         if(ctx_info->process_rt_policy)
1155         {
1156                 queue_head = &policy_info->ctx_rt_queue_head;
1157         }
1158         else
1159         {
1160                 queue_head = &policy_info->ctx_queue_head;
1161         }
1162
1163         head_ctx = OSK_DLIST_FRONT( queue_head,
1164                                     kbase_context,
1165                                     jctx.sched_info.runpool.policy_ctx.cfs.list );
1166         if (OSK_DLIST_IS_VALID( head_ctx, jctx.sched_info.runpool.policy_ctx.cfs.list ) == MALI_TRUE)
1167         {
1168                 u64 head_runtime_us = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1169
1170                 if ((head_runtime_us + priority_weight(ctx_info, (u64)(js_devdata->ctx_timeslice_ns/1000u)))
1171                         < ctx_info->runtime_us)
1172                 {
1173                         /* The context is scheduled out if it's not the least-run context anymore.
1174                          * The "real" head runtime is used instead of the cached runtime so the current
1175                          * context is not scheduled out when there is less contexts than address spaces.
1176                          */
1177                         return MALI_TRUE;
1178                 }
1179         }
1180
1181         return MALI_FALSE;
1182 }
1183
1184 /*
1185  * Job Chain Management
1186  */
1187
1188 mali_error kbasep_js_policy_init_job( kbasep_js_policy *js_policy, kbase_jd_atom *atom )
1189 {
1190         kbasep_js_policy_cfs_ctx *ctx_info;
1191         kbasep_js_policy_cfs *policy_info;
1192         kbase_context *parent_ctx;
1193
1194         OSK_ASSERT( js_policy != NULL );
1195         OSK_ASSERT( atom != NULL );
1196         parent_ctx = atom->kctx;
1197         OSK_ASSERT( parent_ctx != NULL );
1198
1199         policy_info = &js_policy->cfs;
1200         ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1201
1202         /* Adjust context priority to include the new job */
1203         ctx_info->bag_total_nr_atoms++;
1204         ctx_info->bag_total_priority += atom->nice_prio;
1205
1206         /* Get average priority and convert to NICE range -20..19 */
1207         if(ctx_info->bag_total_nr_atoms)
1208         {
1209                 ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
1210         }
1211
1212         /* Determine the job's index into the job list head, will return error if the
1213          * atom is malformed and so is reported. */
1214         return cached_variant_idx_init( policy_info, parent_ctx, atom );
1215 }
1216
1217 void kbasep_js_policy_term_job( kbasep_js_policy *js_policy, kbase_jd_atom *atom )
1218 {
1219         kbasep_js_policy_cfs_job *job_info;
1220         kbasep_js_policy_cfs_ctx *ctx_info;
1221         kbase_context *parent_ctx;
1222
1223         OSK_ASSERT( js_policy != NULL );
1224         CSTD_UNUSED(js_policy);
1225         OSK_ASSERT( atom != NULL );
1226         parent_ctx = atom->kctx;
1227         OSK_ASSERT( parent_ctx != NULL );
1228
1229         job_info = &atom->sched_info.cfs;
1230         ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1231
1232         /* Adjust context priority to no longer include removed job */
1233         OSK_ASSERT(ctx_info->bag_total_nr_atoms > 0);
1234         ctx_info->bag_total_nr_atoms--;
1235         ctx_info->bag_total_priority -= atom->nice_prio;
1236         OSK_ASSERT(ctx_info->bag_total_priority >= 0);
1237
1238         /* Get average priority and convert to NICE range -20..19 */
1239         if(ctx_info->bag_total_nr_atoms)
1240         {
1241                 ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
1242         }
1243
1244         /* In any case, we'll ASSERT that this job was correctly removed from the relevant lists */
1245         OSK_ASSERT( OSK_DLIST_MEMBER_OF( &ctx_info->job_list_head[job_info->cached_variant_idx],
1246                                          atom,
1247                                          sched_info.cfs.list ) == MALI_FALSE );
1248 }
1249
1250 mali_bool kbasep_js_policy_dequeue_job( kbase_device *kbdev,
1251                                         int job_slot_idx,
1252                                         kbase_jd_atom **katom_ptr )
1253 {
1254         kbasep_js_device_data *js_devdata;
1255         kbasep_js_policy_cfs *policy_info;
1256         kbase_context *kctx;
1257         u32 variants_supported;
1258
1259         OSK_ASSERT( kbdev != NULL );
1260         OSK_ASSERT( katom_ptr != NULL );
1261         OSK_ASSERT( job_slot_idx < BASE_JM_MAX_NR_SLOTS );
1262
1263         js_devdata = &kbdev->js_data;
1264         policy_info = &js_devdata->policy.cfs;
1265
1266         /* Get the variants for this slot */
1267         if ( kbasep_js_ctx_attr_count_on_runpool( kbdev, KBASEP_JS_CTX_ATTR_NSS ) == 0 )
1268         {
1269                 /* SS-state */
1270                 variants_supported = get_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_ss_state, job_slot_idx );
1271         }
1272         else
1273         {
1274                 /* NSS-state */
1275                 variants_supported = get_slot_to_variant_lookup( policy_info->slot_to_variant_lookup_nss_state, job_slot_idx );
1276         }
1277
1278         /* First pass through the runpool we consider the realtime priority jobs */
1279         OSK_DLIST_FOREACH( &policy_info->scheduled_ctxs_head,
1280                            kbase_context,
1281                            jctx.sched_info.runpool.policy_ctx.cfs.list,
1282                            kctx )
1283         {
1284                 if(kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy)
1285                 {
1286                         if(dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx))
1287                         {
1288                                 /* Realtime policy job matched */
1289                                 return MALI_TRUE;
1290                         }
1291                 }
1292         }
1293
1294         /* Second pass through the runpool we consider the non-realtime priority jobs */
1295         OSK_DLIST_FOREACH( &policy_info->scheduled_ctxs_head,
1296                            kbase_context,
1297                            jctx.sched_info.runpool.policy_ctx.cfs.list,
1298                            kctx )
1299         {
1300                 if(kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE)
1301                 {
1302                         if(dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx))
1303                         {
1304                                 /* Non-realtime policy job matched */
1305                                 return MALI_TRUE;
1306                         }
1307                 }
1308         }
1309
1310         /* By this point, no contexts had a matching job */
1311         return MALI_FALSE;
1312 }
1313
1314 mali_bool kbasep_js_policy_dequeue_job_irq( kbase_device *kbdev,
1315                                             int job_slot_idx,
1316                                             kbase_jd_atom **katom_ptr )
1317 {
1318         /* IRQ and non-IRQ variants of this are the same (though, the IRQ variant could be made faster) */
1319
1320         /* KBASE_TRACE_ADD_SLOT( kbdev, JS_POLICY_DEQUEUE_JOB_IRQ, NULL, NULL, 0u,
1321                                  job_slot_idx); */
1322         return kbasep_js_policy_dequeue_job( kbdev, job_slot_idx, katom_ptr );
1323 }
1324
1325
1326 void kbasep_js_policy_enqueue_job( kbasep_js_policy *js_policy, kbase_jd_atom *katom )
1327 {
1328         kbasep_js_policy_cfs_job *job_info;
1329         kbasep_js_policy_cfs_ctx *ctx_info;
1330         kbase_context *parent_ctx;
1331
1332         OSK_ASSERT( js_policy != NULL );
1333         OSK_ASSERT( katom != NULL );
1334         parent_ctx = katom->kctx;
1335         OSK_ASSERT( parent_ctx != NULL );
1336
1337         job_info = &katom->sched_info.cfs;
1338         ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1339
1340         {
1341                 kbase_device *kbdev = CONTAINER_OF( js_policy, kbase_device, js_data.policy );
1342                 KBASE_TRACE_ADD( kbdev, JS_POLICY_ENQUEUE_JOB, katom->kctx, katom->user_atom, katom->jc,
1343                                                  0 );
1344         }
1345
1346         OSK_DLIST_PUSH_BACK( &ctx_info->job_list_head[job_info->cached_variant_idx],
1347                              katom,
1348                              kbase_jd_atom,
1349                              sched_info.cfs.list );
1350 }
1351
1352 void kbasep_js_policy_log_job_result( kbasep_js_policy *js_policy, kbase_jd_atom *katom, u32 time_spent_us )
1353 {
1354         kbasep_js_policy_cfs_ctx *ctx_info;
1355         kbase_context *parent_ctx;
1356         OSK_ASSERT( js_policy != NULL );
1357         OSK_ASSERT( katom != NULL );
1358         CSTD_UNUSED( js_policy );
1359
1360         parent_ctx = katom->kctx;
1361         OSK_ASSERT( parent_ctx != NULL );
1362
1363         ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1364
1365         ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us);
1366 }
1367
1368 mali_bool kbasep_js_policy_ctx_has_priority( kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx )
1369 {
1370         kbasep_js_policy_cfs_ctx *current_ctx_info;
1371         kbasep_js_policy_cfs_ctx *new_ctx_info;
1372
1373         OSK_ASSERT( current_ctx != NULL );
1374         OSK_ASSERT( new_ctx != NULL );
1375         CSTD_UNUSED(js_policy);
1376
1377         current_ctx_info = &current_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1378         new_ctx_info = &new_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1379
1380         if((current_ctx_info->process_rt_policy == MALI_FALSE) &&
1381            (new_ctx_info->process_rt_policy == MALI_TRUE))
1382         {
1383                 return MALI_TRUE;
1384         }
1385
1386         if((current_ctx_info->process_rt_policy == new_ctx_info->process_rt_policy) &&
1387            (current_ctx_info->bag_priority > new_ctx_info->bag_priority))
1388         {
1389                 return MALI_TRUE;
1390         }
1391
1392         return MALI_FALSE;
1393 }