3 * (C) COPYRIGHT 2010-2012 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
8 * A copy of the licence is included with the program, and can also be obtained from Free Software
9 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 #include <kbase/src/common/mali_kbase.h>
16 #include <kbase/src/common/mali_kbase_uku.h>
18 #define beenthere(f, a...) OSK_PRINT_INFO(OSK_BASE_JD, "%s:" f, __func__, ##a)
21 * This is the kernel side of the API. Only entry points are:
22 * - kbase_jd_submit(): Called from userspace to submit a single bag
23 * - kbase_jd_done(): Called from interrupt context to track the
24 * completion of a job.
26 * - to the job manager (enqueue a job)
27 * - to the event subsystem (signals the completion/failure of bag/job-chains).
30 STATIC INLINE void dep_raise_sem(u32 *sem, u8 dep)
35 sem[BASEP_JD_SEM_WORD_NR(dep)] |= BASEP_JD_SEM_MASK_IN_WORD(dep);
37 KBASE_EXPORT_TEST_API(dep_raise_sem)
39 STATIC INLINE void dep_clear_sem(u32 *sem, u8 dep)
44 sem[BASEP_JD_SEM_WORD_NR(dep)] &= ~BASEP_JD_SEM_MASK_IN_WORD(dep);
46 KBASE_EXPORT_TEST_API(dep_clear_sem)
48 STATIC INLINE int dep_get_sem(u32 *sem, u8 dep)
53 return !!(sem[BASEP_JD_SEM_WORD_NR(dep)] & BASEP_JD_SEM_MASK_IN_WORD(dep));
55 KBASE_EXPORT_TEST_API(dep_get_sem)
57 STATIC INLINE mali_bool jd_add_dep(kbase_jd_context *ctx,
58 kbase_jd_atom *katom, u8 d)
60 kbase_jd_dep_queue *dq = &ctx->dep_queue;
61 u8 s = katom->pre_dep.dep[d];
63 if (!dep_get_sem(ctx->dep_queue.sem, s))
67 * The slot must be free already. If not, then something went
68 * wrong in the validate path.
70 OSK_ASSERT(!dq->queue[s]);
73 beenthere("queued %p slot %d", (void *)katom, s);
77 KBASE_EXPORT_TEST_API(jd_add_dep)
80 * This function only computes the address of the first possible
81 * atom. It doesn't mean it's actually valid (jd_validate_atom takes
84 STATIC INLINE base_jd_atom *jd_get_first_atom(kbase_jd_context *ctx,
87 /* Check that offset is within pool */
88 if ((bag->offset + sizeof(base_jd_atom)) > ctx->pool_size)
91 return (base_jd_atom *)((char *)ctx->pool + bag->offset);
93 KBASE_EXPORT_TEST_API(jd_get_first_atom)
96 * Same as with jd_get_first_atom, but for any subsequent atom.
98 STATIC INLINE base_jd_atom *jd_get_next_atom(kbase_jd_atom *katom)
100 /* Think of adding extra padding for userspace */
101 return (base_jd_atom *)base_jd_get_atom_syncset(katom->user_atom, katom->nr_syncsets);
103 KBASE_EXPORT_TEST_API(jd_get_next_atom)
106 * This will check atom for correctness and if so, initialize its js policy.
108 STATIC INLINE kbase_jd_atom *jd_validate_atom(struct kbase_context *kctx,
113 kbase_jd_context *jctx = &kctx->jctx;
114 kbase_jd_atom *katom;
119 /* Check the atom struct fits in the pool before we attempt to access it
120 Note: a bad bag->nr_atom could trigger this condition */
121 if(((char *)atom + sizeof(base_jd_atom)) > ((char *)jctx->pool + jctx->pool_size))
124 nr_syncsets = atom->nr_syncsets;
125 pre_dep = atom->pre_dep;
127 /* Check that the whole atom fits within the pool.
128 * syncsets integrity will be performed as we execute them */
129 if ((char *)base_jd_get_atom_syncset(atom, nr_syncsets) > ((char *)jctx->pool + jctx->pool_size))
133 * Check that dependencies are sensible: the atom cannot have
134 * pre-dependencies that are already in use by another atom.
136 if (jctx->dep_queue.queue[pre_dep.dep[0]] ||
137 jctx->dep_queue.queue[pre_dep.dep[1]])
140 /* Check for conflicting dependencies inside the bag */
141 if (dep_get_sem(sem, pre_dep.dep[0]) ||
142 dep_get_sem(sem, pre_dep.dep[1]))
145 dep_raise_sem(sem, pre_dep.dep[0]);
146 dep_raise_sem(sem, pre_dep.dep[1]);
148 /* We surely want to preallocate a pool of those, or have some
149 * kind of slab allocator around */
150 katom = osk_calloc(sizeof(*katom));
152 return NULL; /* Ideally we should handle OOM more gracefully */
154 katom->user_atom = atom;
155 katom->pre_dep = pre_dep;
156 katom->post_dep = atom->post_dep;
159 katom->nr_syncsets = nr_syncsets;
161 katom->core_req = atom->core_req;
162 katom->jc = atom->jc;
165 * If the priority is increased we need to check the caller has security caps to do this, if
166 * prioirty is decreased then this is ok as the result will have no negative impact on other
169 katom->nice_prio = atom->prio;
170 if( 0 > katom->nice_prio)
172 mali_bool access_allowed;
173 access_allowed = kbase_security_has_capability(kctx, KBASE_SEC_MODIFY_PRIORITY, KBASE_SEC_FLAG_NOAUDIT);
176 /* For unprivileged processes - a negative priority is interpreted as zero */
177 katom->nice_prio = 0;
181 /* Scale priority range to use NICE range */
184 /* Remove sign for calculation */
185 nice_priority = katom->nice_prio+128;
186 /* Fixed point maths to scale from ..255 to 0..39 (NICE range with +20 offset) */
187 katom->nice_prio = (((20<<16)/128)*nice_priority)>>16;
190 /* pre-fill the event */
191 katom->event.event_code = BASE_JD_EVENT_DONE;
192 katom->event.data = katom;
194 /* Initialize the jobscheduler policy for this atom. Function will
195 * return error if the atom is malformed. Then inmediatelly terminate
196 * the policy to free allocated resources and return error.
198 * Soft-jobs never enter the job scheduler so we don't initialise the policy for these
200 if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0)
202 kbasep_js_policy *js_policy = &(kctx->kbdev->js_data.policy);
203 if (MALI_ERROR_NONE != kbasep_js_policy_init_job( js_policy, katom ))
212 KBASE_EXPORT_TEST_API(jd_validate_atom)
214 static void kbase_jd_cancel_bag(kbase_context *kctx, kbase_jd_bag *bag,
215 base_jd_event_code code)
217 bag->event.event_code = code;
218 kbase_event_post(kctx, &bag->event);
221 STATIC void kbase_jd_katom_dtor(kbase_event *event)
223 kbase_jd_atom *katom = CONTAINER_OF(event, kbase_jd_atom, event);
224 kbasep_js_policy *js_policy = &(katom->kctx->kbdev->js_data.policy);
226 kbasep_js_policy_term_job( js_policy, katom );
229 KBASE_EXPORT_TEST_API(kbase_jd_katom_dtor)
231 STATIC mali_error kbase_jd_validate_bag(kbase_context *kctx,
235 kbase_jd_context *jctx = &kctx->jctx;
236 kbase_jd_atom *katom;
238 mali_error err = MALI_ERROR_NONE;
239 u32 sem[BASEP_JD_SEM_ARRAY_SIZE] = { 0 };
242 atom = jd_get_first_atom(jctx, bag);
246 kbase_jd_cancel_bag(kctx, bag, BASE_JD_EVENT_BAG_INVALID);
247 err = MALI_ERROR_FUNCTION_FAILED;
251 for (i = 0; i < bag->nr_atoms; i++)
253 katom = jd_validate_atom(kctx, bag, atom, sem);
256 OSK_DLIST_EMPTY_LIST(klistp, kbase_event,
257 entry, kbase_jd_katom_dtor);
258 kbase_jd_cancel_bag(kctx, bag, BASE_JD_EVENT_BAG_INVALID);
259 err = MALI_ERROR_FUNCTION_FAILED;
263 OSK_DLIST_PUSH_BACK(klistp, &katom->event,
265 atom = jd_get_next_atom(katom);
271 KBASE_EXPORT_TEST_API(kbase_jd_validate_bag)
273 STATIC INLINE kbase_jd_atom *jd_resolve_dep(kbase_jd_atom *katom, u8 d, int zapping)
277 kbase_jd_atom *dep_katom;
278 kbase_jd_context *ctx = &katom->kctx->jctx;
280 dep = katom->post_dep.dep[d];
285 dep_clear_sem(ctx->dep_queue.sem, dep);
287 /* Get the atom that's waiting for us (if any), and remove it
288 * from this particular dependency queue */
289 dep_katom = ctx->dep_queue.queue[dep];
291 /* Case of a dangling dependency */
295 ctx->dep_queue.queue[dep] = NULL;
297 beenthere("removed %p from slot %d",
298 (void *)dep_katom, dep);
300 /* Find out if this atom is waiting for another job to be done.
301 * If it's not waiting anymore, put it on the run queue. */
302 if (dep_katom->pre_dep.dep[0] == dep)
303 other_dep = dep_katom->pre_dep.dep[1];
305 other_dep = dep_katom->pre_dep.dep[0];
308 * The following line seem to confuse people, so here's the
309 * rational behind it:
311 * The queue hold pointers to atoms waiting for a single
312 * pre-dependency to be satisfied. Above, we've already
313 * satisfied a pre-dep for an atom (dep_katom). The next step
314 * is to check whether this atom is now free to run, or has to
315 * wait for another pre-dep to be satisfied.
317 * For a single entry, 3 possibilities:
319 * - It's a pointer to dep_katom -> the pre-dep has not been
320 * satisfied yet, and it cannot run immediately.
322 * - It's NULL -> the atom can be scheduled immediately, as
323 * the dependency has already been satisfied.
325 * - Neither of the above: this is the case of a dependency
326 * that has already been satisfied, and the slot reused by
327 * an incoming atom -> dep_katom can be run immediately.
329 if (ctx->dep_queue.queue[other_dep] != dep_katom)
333 * We're on a killing spree. Cancel the additionnal
334 * dependency, and return the atom anyway. An unfortunate
335 * consequence is that userpace may receive notifications out
336 * of order WRT the dependency tree.
340 ctx->dep_queue.queue[other_dep] = NULL;
344 beenthere("katom %p waiting for slot %d",
345 (void *)dep_katom, other_dep);
348 KBASE_EXPORT_TEST_API(jd_resolve_dep)
351 * Perform the necessary handling of an atom that has finished running
352 * on the GPU. The @a zapping parameter instruct the function to
353 * propagate the state of the completed atom to all the atoms that
354 * depend on it, directly or indirectly.
356 * This flag is used for error propagation in the "failed job", or
357 * when destroying a context.
359 * When not zapping, the caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
361 STATIC mali_bool jd_done_nolock(kbase_jd_atom *katom, int zapping)
363 kbase_jd_atom *dep_katom;
364 struct kbase_context *kctx = katom->kctx;
365 osk_dlist ts; /* traversal stack */
366 osk_dlist *tsp = &ts;
367 osk_dlist vl; /* visited list */
368 osk_dlist *vlp = &vl;
370 base_jd_event_code event_code = katom->event.event_code;
371 mali_bool need_to_try_schedule_context = MALI_FALSE;
374 * We're trying to achieve two goals here:
375 * - Eliminate dependency atoms very early so we can push real
377 * - Avoid recursion which could result in a nice DoS from
380 * We use two lists here:
381 * - One as a stack (ts) to get rid of the recursion
382 * - The other to queue jobs that are either done or ready to
389 OSK_DLIST_PUSH_BACK(tsp, &katom->event, kbase_event, entry);
391 while(!OSK_DLIST_IS_EMPTY(tsp))
394 node = OSK_DLIST_POP_BACK(tsp, kbase_jd_atom, event.entry);
397 node->core_req == BASE_JD_REQ_DEP ||
401 for (i = 0; i < 2; i++)
403 dep_katom = jd_resolve_dep(node, i, zapping);
404 if (dep_katom) /* push */
405 OSK_DLIST_PUSH_BACK(tsp,
412 OSK_DLIST_PUSH_BACK(vlp, &node->event,
416 while(!OSK_DLIST_IS_EMPTY(vlp))
418 node = OSK_DLIST_POP_FRONT(vlp, kbase_jd_atom, event.entry);
421 node->core_req == BASE_JD_REQ_DEP ||
422 (node->core_req & BASE_JD_REQ_SOFT_JOB) ||
425 kbase_jd_bag *bag = node->bag;
427 /* If we're zapping stuff, propagate the event code */
430 node->event.event_code = event_code;
432 else if (node->core_req & BASE_JD_REQ_SOFT_JOB)
434 kbase_process_soft_job( kctx, node );
437 /* This will signal our per-context worker
438 * thread that we're done with this katom. Any
439 * use of this katom after that point IS A
441 kbase_event_post(kctx, &node->event);
442 beenthere("done atom %p\n", (void*)node);
444 if (--bag->nr_atoms == 0)
446 /* This atom was the last, signal userspace */
447 kbase_event_post(kctx, &bag->event);
448 beenthere("done bag %p\n", (void*)bag);
451 /* Decrement and check the TOTAL number of jobs. This includes
452 * those not tracked by the scheduler: 'not ready to run' and
453 * 'dependency-only' jobs. */
454 if (--kctx->jctx.job_nr == 0)
456 /* All events are safely queued now, and we can signal any waiter
457 * that we've got no more jobs (so we can be safely terminated) */
458 osk_waitq_set(&kctx->jctx.zero_jobs_waitq);
463 /* Queue an action about whether we should try scheduling a context */
464 need_to_try_schedule_context |= kbasep_js_add_job( kctx, node );
468 return need_to_try_schedule_context;
470 KBASE_EXPORT_TEST_API(jd_done_nolock)
472 mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *user_bag)
475 osk_dlist *klistp = &klist;
476 kbase_jd_context *jctx = &kctx->jctx;
477 kbase_jd_atom *katom;
479 mali_error err = MALI_ERROR_NONE;
481 mali_bool need_to_try_schedule_context = MALI_FALSE;
485 * kbase_jd_submit isn't expected to fail and so all errors with the jobs
486 * are reported by immediately falling them (through event system)
491 beenthere("%s", "Enter");
492 bag = osk_malloc(sizeof(*bag));
495 err = MALI_ERROR_OUT_OF_MEMORY;
499 bag->core_restriction = user_bag->core_restriction;
500 bag->offset = user_bag->offset;
501 bag->nr_atoms = user_bag->nr_atoms;
502 bag->event.event_code = BASE_JD_EVENT_BAG_DONE;
503 bag->event.data = (void *)(uintptr_t)user_bag->bag_uaddr;
505 osk_mutex_lock(&jctx->lock);
508 * Use a transient list to store all the validated atoms.
509 * Once we're sure nothing is wrong, there's no going back.
511 OSK_DLIST_INIT(klistp);
513 if (kbase_jd_validate_bag(kctx, bag, klistp))
515 err = MALI_ERROR_FUNCTION_FAILED;
519 while(!OSK_DLIST_IS_EMPTY(klistp))
522 katom = OSK_DLIST_POP_FRONT(klistp,
523 kbase_jd_atom, event.entry);
526 /* This is crucial. As jobs are processed in-order, we must
527 * indicate that any job with a pre-dep on this particular job
528 * must wait for its completion (indicated as a post-dep).
530 dep_raise_sem(jctx->dep_queue.sem, katom->post_dep.dep[0]);
531 dep_raise_sem(jctx->dep_queue.sem, katom->post_dep.dep[1]);
533 /* Process pre-exec syncsets before queueing */
534 kbase_pre_job_sync(kctx,
535 base_jd_get_atom_syncset(katom->user_atom, 0),
537 /* Update the TOTAL number of jobs. This includes those not tracked by
538 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
540 /* Cause any future waiter-on-termination to wait until the jobs are
542 osk_waitq_clear(&jctx->zero_jobs_waitq);
543 /* If no pre-dep has been set, then we're free to run
544 * the job immediately */
545 if ((jd_add_dep(jctx, katom, 0) | jd_add_dep(jctx, katom, 1)))
547 beenthere("queuing atom #%d(%p %p)", i,
548 (void *)katom, (void *)katom->user_atom);
552 beenthere("running atom #%d(%p %p)", i,
553 (void *)katom, (void *)katom->user_atom);
555 /* Lock the JS Context, for submitting jobs, and queue an action about
556 * whether we need to try scheduling the context */
557 osk_mutex_lock( &kctx->jctx.sched_info.ctx.jsctx_mutex );
559 if (katom->core_req & BASE_JD_REQ_SOFT_JOB)
561 kbase_process_soft_job( kctx, katom );
562 /* Pure software job, so resolve it immediately */
563 need_to_try_schedule_context |= jd_done_nolock(katom, 0);
565 else if (katom->core_req != BASE_JD_REQ_DEP)
567 need_to_try_schedule_context |= kbasep_js_add_job( kctx, katom );
571 /* This is a pure dependency. Resolve it immediately */
572 need_to_try_schedule_context |= jd_done_nolock(katom, 0);
574 osk_mutex_unlock( &kctx->jctx.sched_info.ctx.jsctx_mutex );
577 /* Only whilst we've dropped the JS context lock can we schedule a new
580 * As an optimization, we only need to do this after processing all jobs
581 * resolved from this context. */
582 if ( need_to_try_schedule_context != MALI_FALSE )
584 kbasep_js_try_schedule_head_ctx( kbdev );
587 osk_mutex_unlock(&jctx->lock);
589 beenthere("%s", "Exit");
592 KBASE_EXPORT_TEST_API(kbase_jd_submit)
594 STATIC void kbasep_jd_check_deref_cores(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
596 if (katom->affinity != 0)
598 u64 tiler_affinity = 0;
599 if (katom->core_req & BASE_JD_REQ_T)
601 tiler_affinity = kbdev->tiler_present_bitmap;
603 kbase_pm_release_cores(kbdev, katom->affinity, tiler_affinity);
610 * - requeues the job from the runpool (if it was soft-stopped/removed from NEXT registers)
611 * - removes it from the system if it finished/failed/was cancelled.
612 * - resolves dependencies to add dependent jobs to the context, potentially starting them if necessary (which may add more references to the context)
613 * - releases the reference to the context from the no-longer-running job.
614 * - Handles retrying submission outside of IRQ context if it failed from within IRQ context.
616 static void jd_done_worker(osk_workq_work *data)
618 kbase_jd_atom *katom = CONTAINER_OF(data, kbase_jd_atom, work);
619 kbase_jd_context *jctx;
621 kbasep_js_kctx_info *js_kctx_info;
622 kbasep_js_policy *js_policy;
624 kbasep_js_device_data *js_devdata;
626 u64 cache_jc = katom->jc;
627 base_jd_atom *cache_user_atom = katom->user_atom;
629 mali_bool retry_submit;
635 js_kctx_info = &kctx->jctx.sched_info;
637 js_devdata = &kbdev->js_data;
638 js_policy = &kbdev->js_data.policy;
640 KBASE_TRACE_ADD( kbdev, JD_DONE_WORKER, kctx, katom->user_atom, katom->jc, 0 );
642 * Begin transaction on JD context and JS context
644 osk_mutex_lock( &jctx->lock );
645 osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex );
647 /* This worker only gets called on contexts that are scheduled *in*. This is
648 * because it only happens in response to an IRQ from a job that was
651 OSK_ASSERT( js_kctx_info->ctx.is_scheduled != MALI_FALSE );
653 /* Release cores this job was using (this might power down unused cores) */
654 kbasep_jd_check_deref_cores(kbdev, katom);
656 /* Grab the retry_submit state before the katom disappears */
657 retry_submit = kbasep_js_get_job_retry_submit_slot( katom, &retry_jobslot );
659 if (katom->event.event_code == BASE_JD_EVENT_STOPPED
660 || katom->event.event_code == BASE_JD_EVENT_REMOVED_FROM_NEXT )
662 /* Requeue the atom on soft-stop / removed from NEXT registers */
663 OSK_PRINT_INFO(OSK_BASE_JM, "JS: Soft Stopped/Removed from next %p on Ctx %p; Requeuing", kctx );
665 osk_mutex_lock( &js_devdata->runpool_mutex );
666 kbasep_js_clear_job_retry_submit( katom );
668 osk_spinlock_irq_lock( &js_devdata->runpool_irq.lock );
669 kbasep_js_policy_enqueue_job( js_policy, katom );
670 osk_spinlock_irq_unlock( &js_devdata->runpool_irq.lock );
672 /* This may now be the only job present (work queues can run items out of order
673 * e.g. on different CPUs), so we must try to run it, otherwise it might not get
674 * run at all after this. */
675 kbasep_js_try_run_next_job( kbdev );
677 osk_mutex_unlock( &js_devdata->runpool_mutex );
681 /* Remove the job from the system for all other reasons */
682 mali_bool need_to_try_schedule_context;
684 kbasep_js_remove_job( kctx, katom );
686 zapping = (katom->event.event_code != BASE_JD_EVENT_DONE);
687 need_to_try_schedule_context = jd_done_nolock(katom, zapping);
689 /* This ctx is already scheduled in, so return value guarenteed FALSE */
690 OSK_ASSERT( need_to_try_schedule_context == MALI_FALSE );
692 /* katom may have been freed now, do not use! */
695 * Transaction complete
697 osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );
698 osk_mutex_unlock( &jctx->lock );
700 /* Job is now no longer running, so can now safely release the context reference
701 * This potentially schedules out the context, schedules in a new one, and
702 * runs a new job on the new one */
703 kbasep_js_runpool_release_ctx( kbdev, kctx );
705 /* If the IRQ handler failed to get a job from the policy, try again from
706 * outside the IRQ handler */
707 if ( retry_submit != MALI_FALSE )
709 KBASE_TRACE_ADD_SLOT( kbdev, JD_DONE_TRY_RUN_NEXT_JOB, kctx, cache_user_atom, cache_jc, retry_jobslot );
710 osk_mutex_lock( &js_devdata->runpool_mutex );
711 kbasep_js_try_run_next_job_on_slot( kbdev, retry_jobslot );
712 osk_mutex_unlock( &js_devdata->runpool_mutex );
714 KBASE_TRACE_ADD( kbdev, JD_DONE_WORKER_END, kctx, cache_user_atom, cache_jc, 0 );
718 * Work queue job cancel function
719 * Only called as part of 'Zapping' a context (which occurs on termination)
720 * Operates serially with the jd_done_worker() on the work queue
722 static void jd_cancel_worker(osk_workq_work *data)
724 kbase_jd_atom *katom = CONTAINER_OF(data, kbase_jd_atom, work);
725 kbase_jd_context *jctx;
727 kbasep_js_kctx_info *js_kctx_info;
728 mali_bool need_to_try_schedule_context;
732 js_kctx_info = &kctx->jctx.sched_info;
735 kbase_device *kbdev = kctx->kbdev;
736 KBASE_TRACE_ADD( kbdev, JD_CANCEL_WORKER, kctx, katom->user_atom, katom->jc, 0 );
739 /* This only gets called on contexts that are scheduled out. Hence, we must
740 * make sure we don't de-ref the number of running jobs (there aren't
741 * any), nor must we try to schedule out the context (it's already
744 OSK_ASSERT( js_kctx_info->ctx.is_scheduled == MALI_FALSE );
746 /* Release cores this job was using (this might power down unused cores) */
747 kbasep_jd_check_deref_cores(kctx->kbdev, katom);
749 /* Scheduler: Remove the job from the system */
750 osk_mutex_lock( &js_kctx_info->ctx.jsctx_mutex );
751 kbasep_js_remove_job( kctx, katom );
752 osk_mutex_unlock( &js_kctx_info->ctx.jsctx_mutex );
754 osk_mutex_lock(&jctx->lock);
756 /* Always enable zapping */
757 need_to_try_schedule_context = jd_done_nolock(katom, 1);
758 /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
759 * schedule the context. There's also no need for the jsctx_mutex to have been taken
760 * around this too. */
761 OSK_ASSERT( need_to_try_schedule_context == MALI_FALSE );
763 /* katom may have been freed now, do not use! */
764 osk_mutex_unlock(&jctx->lock);
769 void kbase_jd_done(kbase_jd_atom *katom)
779 KBASE_TRACE_ADD( kbdev, JD_DONE, kctx, katom->user_atom, katom->jc, 0 );
781 osk_workq_work_init(&katom->work, jd_done_worker);
782 osk_workq_submit(&kctx->jctx.job_done_wq, &katom->work);
784 KBASE_EXPORT_TEST_API(kbase_jd_done)
786 void kbase_jd_cancel(kbase_jd_atom *katom)
789 kbasep_js_kctx_info *js_kctx_info;
793 js_kctx_info = &kctx->jctx.sched_info;
796 KBASE_TRACE_ADD( kbdev, JD_CANCEL, kctx, katom->user_atom, katom->jc, 0 );
798 /* This should only be done from a context that is not scheduled */
799 OSK_ASSERT( js_kctx_info->ctx.is_scheduled == MALI_FALSE );
801 katom->event.event_code = BASE_JD_EVENT_JOB_CANCELLED;
803 osk_workq_work_init(&katom->work, jd_cancel_worker);
804 osk_workq_submit(&kctx->jctx.job_done_wq, &katom->work);
807 void kbase_jd_flush_workqueues(kbase_context *kctx)
817 osk_workq_flush( &kctx->jctx.job_done_wq );
819 /* Flush all workqueues, for simplicity */
820 for (i = 0; i < kbdev->nr_address_spaces; i++)
822 osk_workq_flush( &kbdev->as[i].pf_wq );
826 typedef struct zap_reset_data
829 * 1. The timer has never been called
830 * 2. The zap has timed out, all slots are soft-stopped - the GPU reset will happen.
831 * The GPU has been reset when kbdev->reset_waitq is signalled
833 * (-1 - The timer has been cancelled)
841 static void zap_timeout_callback(void *data)
843 zap_reset_data *reset_data = (zap_reset_data*)data;
844 kbase_device *kbdev = reset_data->kbdev;
846 osk_spinlock_lock(&reset_data->lock);
848 if (reset_data->stage == -1)
853 if (kbase_prepare_to_reset_gpu(kbdev))
855 kbase_reset_gpu(kbdev);
858 reset_data->stage = 2;
861 osk_spinlock_unlock(&reset_data->lock);
864 void kbase_jd_zap_context(kbase_context *kctx)
867 osk_timer zap_timeout;
869 zap_reset_data reset_data;
875 KBASE_TRACE_ADD( kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u );
876 kbase_job_zap_context(kctx);
878 ret = osk_timer_on_stack_init(&zap_timeout);
879 if (ret != OSK_ERR_NONE)
884 ret = osk_spinlock_init(&reset_data.lock, OSK_LOCK_ORDER_JD_ZAP_CONTEXT);
885 if (ret != OSK_ERR_NONE)
887 osk_timer_on_stack_term(&zap_timeout);
891 reset_data.kbdev = kbdev;
892 reset_data.timer = &zap_timeout;
893 reset_data.stage = 1;
894 osk_timer_callback_set(&zap_timeout, zap_timeout_callback, &reset_data);
895 ret = osk_timer_start(&zap_timeout, ZAP_TIMEOUT);
897 if (ret != OSK_ERR_NONE)
899 osk_spinlock_term(&reset_data.lock);
900 osk_timer_on_stack_term(&zap_timeout);
904 /* If we jump to here then the zap timeout will not be active,
905 * so if the GPU hangs the driver will also hang. This will only
906 * happen if the driver is very resource starved.
910 /* Wait for all jobs to finish, and for the context to be not-scheduled
911 * (due to kbase_job_zap_context(), we also guarentee it's not in the JS
912 * policy queue either */
913 osk_waitq_wait(&kctx->jctx.zero_jobs_waitq);
914 osk_waitq_wait(&kctx->jctx.sched_info.ctx.not_scheduled_waitq);
916 if (ret == OSK_ERR_NONE)
918 osk_spinlock_lock(&reset_data.lock);
919 if (reset_data.stage == 1)
921 /* The timer hasn't run yet - so cancel it */
922 reset_data.stage = -1;
924 osk_spinlock_unlock(&reset_data.lock);
926 osk_timer_stop(&zap_timeout);
928 if (reset_data.stage == 2)
930 /* The reset has already started.
931 * Wait for the reset to complete
933 osk_waitq_wait(&kbdev->reset_waitq);
935 osk_timer_on_stack_term(&zap_timeout);
936 osk_spinlock_term(&reset_data.lock);
939 OSK_PRINT_INFO(OSK_BASE_JM, "Zap: Finished Context %p", kctx );
941 /* Ensure that the signallers of the waitqs have finished */
942 osk_mutex_lock(&kctx->jctx.lock);
943 osk_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
944 osk_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
945 osk_mutex_unlock(&kctx->jctx.lock);
947 KBASE_EXPORT_TEST_API(kbase_jd_zap_context)
949 mali_error kbase_jd_init(struct kbase_context *kctx)
957 OSK_ASSERT(NULL == kctx->jctx.pool);
959 kaddr = osk_vmalloc(BASEP_JCTX_RB_NRPAGES * OSK_PAGE_SIZE);
962 mali_err = MALI_ERROR_OUT_OF_MEMORY;
965 osk_err = osk_workq_init(&kctx->jctx.job_done_wq, "mali_jd", 0);
966 if (OSK_ERR_NONE != osk_err)
968 mali_err = MALI_ERROR_OUT_OF_MEMORY;
972 for (i = 0; i < 256; i++)
973 kctx->jctx.dep_queue.queue[i] = NULL;
975 for (i = 0; i < BASEP_JD_SEM_ARRAY_SIZE; i++)
976 kctx->jctx.dep_queue.sem[i] = 0;
978 osk_err = osk_mutex_init(&kctx->jctx.lock, OSK_LOCK_ORDER_JCTX);
979 if (OSK_ERR_NONE != osk_err)
981 mali_err = MALI_ERROR_FUNCTION_FAILED;
985 osk_err = osk_waitq_init(&kctx->jctx.zero_jobs_waitq);
986 if (OSK_ERR_NONE != osk_err)
988 mali_err = MALI_ERROR_FUNCTION_FAILED;
992 osk_err = osk_spinlock_irq_init(&kctx->jctx.tb_lock, OSK_LOCK_ORDER_TB);
993 if (OSK_ERR_NONE != osk_err)
995 mali_err = MALI_ERROR_FUNCTION_FAILED;
999 osk_waitq_set(&kctx->jctx.zero_jobs_waitq);
1001 kctx->jctx.pool = kaddr;
1002 kctx->jctx.pool_size = BASEP_JCTX_RB_NRPAGES * OSK_PAGE_SIZE;
1003 kctx->jctx.job_nr = 0;
1005 return MALI_ERROR_NONE;
1008 osk_waitq_term(&kctx->jctx.zero_jobs_waitq);
1010 osk_mutex_term(&kctx->jctx.lock);
1012 osk_workq_term(&kctx->jctx.job_done_wq);
1018 KBASE_EXPORT_TEST_API(kbase_jd_init)
1020 void kbase_jd_exit(struct kbase_context *kctx)
1023 /* Assert if kbase_jd_init has not been called before this function
1024 (kbase_jd_init initializes the pool) */
1025 OSK_ASSERT(kctx->jctx.pool);
1027 osk_spinlock_irq_term(&kctx->jctx.tb_lock);
1028 /* Work queue is emptied by this */
1029 osk_workq_term(&kctx->jctx.job_done_wq);
1030 osk_waitq_term(&kctx->jctx.zero_jobs_waitq);
1031 osk_vfree(kctx->jctx.pool);
1032 osk_mutex_term(&kctx->jctx.lock);
1034 KBASE_EXPORT_TEST_API(kbase_jd_exit)