2 * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
11 #include "mali_kernel_common.h"
12 #include "mali_group.h"
14 #include "mali_l2_cache.h"
18 #include "mali_dlbu.h"
19 #include "mali_broadcast.h"
20 #include "mali_scheduler.h"
21 #include "mali_osk_profiling.h"
22 #include "mali_pm_domain.h"
24 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
25 #include <linux/sched.h>
26 #include <trace/events/gpu.h>
30 static void mali_group_bottom_half_mmu(void *data);
31 static void mali_group_bottom_half_gp(void *data);
32 static void mali_group_bottom_half_pp(void *data);
34 static void mali_group_timeout(void *data);
35 static void mali_group_reset_pp(struct mali_group *group);
36 static void mali_group_reset_mmu(struct mali_group *group);
38 #if defined(CONFIG_MALI400_PROFILING)
39 static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
40 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
43 * The group object is the most important object in the device driver,
44 * and acts as the center of many HW operations.
45 * The reason for this is that operations on the MMU will affect all
46 * cores connected to this MMU (a group is defined by the MMU and the
47 * cores which are connected to this).
48 * The group lock is thus the most important lock, followed by the
49 * GP and PP scheduler locks. They must be taken in the following
51 * GP/PP lock first, then group lock(s).
54 static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
55 static u32 mali_global_num_groups = 0;
58 int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
60 /* local helper functions */
61 static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
62 static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
63 static void mali_group_recovery_reset(struct mali_group *group);
64 static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
66 static void mali_group_post_process_job_pp(struct mali_group *group);
67 static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
69 void mali_group_lock(struct mali_group *group)
71 #ifdef MALI_UPPER_HALF_SCHEDULING
72 _mali_osk_spinlock_irq_lock(group->lock);
74 _mali_osk_spinlock_lock(group->lock);
76 MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
79 void mali_group_unlock(struct mali_group *group)
81 MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
82 #ifdef MALI_UPPER_HALF_SCHEDULING
83 _mali_osk_spinlock_irq_unlock(group->lock);
85 _mali_osk_spinlock_unlock(group->lock);
90 void mali_group_assert_locked(struct mali_group *group)
92 MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
97 struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
99 struct mali_group *group = NULL;
101 if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
102 MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
106 group = _mali_osk_calloc(1, sizeof(struct mali_group));
108 group->timeout_timer = _mali_osk_timer_init();
110 if (NULL != group->timeout_timer) {
111 _mali_osk_lock_order_t order;
112 _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
115 order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
117 order = _MALI_OSK_LOCK_ORDER_GROUP;
120 #ifdef MALI_UPPER_HALF_SCHEDULING
121 group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
123 group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
126 if (NULL != group->lock) {
127 group->l2_cache_core[0] = core;
128 group->session = NULL;
129 group->power_is_on = MALI_TRUE;
130 group->state = MALI_GROUP_STATE_IDLE;
131 _mali_osk_list_init(&group->group_list);
132 _mali_osk_list_init(&group->pp_scheduler_list);
133 group->parent_group = NULL;
134 group->l2_cache_core_ref_count[0] = 0;
135 group->l2_cache_core_ref_count[1] = 0;
136 group->bcast_core = bcast;
137 group->dlbu_core = dlbu;
139 mali_global_groups[mali_global_num_groups] = group;
140 mali_global_num_groups++;
144 _mali_osk_timer_term(group->timeout_timer);
146 _mali_osk_free(group);
152 _mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core)
154 /* This group object now owns the MMU core object */
155 group->mmu= mmu_core;
156 group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
157 if (NULL == group->bottom_half_work_mmu) {
158 return _MALI_OSK_ERR_FAULT;
160 return _MALI_OSK_ERR_OK;
163 void mali_group_remove_mmu_core(struct mali_group *group)
165 /* This group object no longer owns the MMU core object */
167 if (NULL != group->bottom_half_work_mmu) {
168 _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
172 _mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
174 /* This group object now owns the GP core object */
175 group->gp_core = gp_core;
176 group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
177 if (NULL == group->bottom_half_work_gp) {
178 return _MALI_OSK_ERR_FAULT;
180 return _MALI_OSK_ERR_OK;
183 void mali_group_remove_gp_core(struct mali_group *group)
185 /* This group object no longer owns the GP core object */
186 group->gp_core = NULL;
187 if (NULL != group->bottom_half_work_gp) {
188 _mali_osk_wq_delete_work(group->bottom_half_work_gp);
192 _mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
194 /* This group object now owns the PP core object */
195 group->pp_core = pp_core;
196 group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
197 if (NULL == group->bottom_half_work_pp) {
198 return _MALI_OSK_ERR_FAULT;
200 return _MALI_OSK_ERR_OK;
203 void mali_group_remove_pp_core(struct mali_group *group)
205 /* This group object no longer owns the PP core object */
206 group->pp_core = NULL;
207 if (NULL != group->bottom_half_work_pp) {
208 _mali_osk_wq_delete_work(group->bottom_half_work_pp);
212 void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
214 group->pm_domain = domain;
217 void mali_group_delete(struct mali_group *group)
221 MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
223 MALI_DEBUG_ASSERT(NULL == group->parent_group);
225 /* Delete the resources that this group owns */
226 if (NULL != group->gp_core) {
227 mali_gp_delete(group->gp_core);
230 if (NULL != group->pp_core) {
231 mali_pp_delete(group->pp_core);
234 if (NULL != group->mmu) {
235 mali_mmu_delete(group->mmu);
238 if (mali_group_is_virtual(group)) {
239 /* Remove all groups from virtual group */
240 struct mali_group *child;
241 struct mali_group *temp;
243 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
244 child->parent_group = NULL;
245 mali_group_delete(child);
248 mali_dlbu_delete(group->dlbu_core);
250 if (NULL != group->bcast_core) {
251 mali_bcast_unit_delete(group->bcast_core);
255 for (i = 0; i < mali_global_num_groups; i++) {
256 if (mali_global_groups[i] == group) {
257 mali_global_groups[i] = NULL;
258 mali_global_num_groups--;
260 if (i != mali_global_num_groups) {
261 /* We removed a group from the middle of the array -- move the last
262 * group to the current position to close the gap */
263 mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
264 mali_global_groups[mali_global_num_groups] = NULL;
271 if (NULL != group->timeout_timer) {
272 _mali_osk_timer_del(group->timeout_timer);
273 _mali_osk_timer_term(group->timeout_timer);
276 if (NULL != group->bottom_half_work_mmu) {
277 _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
280 if (NULL != group->bottom_half_work_gp) {
281 _mali_osk_wq_delete_work(group->bottom_half_work_gp);
284 if (NULL != group->bottom_half_work_pp) {
285 _mali_osk_wq_delete_work(group->bottom_half_work_pp);
288 #ifdef MALI_UPPER_HALF_SCHEDULING
289 _mali_osk_spinlock_irq_term(group->lock);
291 _mali_osk_spinlock_term(group->lock);
293 _mali_osk_free(group);
296 MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
299 struct mali_group *group;
300 struct mali_group *temp;
302 MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
303 MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
304 MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
307 _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
308 MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
314 * @brief Add child group to virtual group parent
316 * Before calling this function, child must have it's state set to JOINING_VIRTUAL
317 * to ensure it's not touched during the transition period. When this function returns,
318 * child's state will be IN_VIRTUAL.
320 void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
324 struct mali_session_data *child_session;
326 MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
328 MALI_ASSERT_GROUP_LOCKED(parent);
330 MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
331 MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
332 MALI_DEBUG_ASSERT(NULL == child->parent_group);
333 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
335 _mali_osk_list_addtail(&child->group_list, &parent->group_list);
337 child->state = MALI_GROUP_STATE_IN_VIRTUAL;
338 child->parent_group = parent;
340 MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
342 MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
343 MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
345 /* Keep track of the L2 cache cores of child groups */
347 for (i = 0; i < 2; i++) {
348 if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
349 MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
350 parent->l2_cache_core_ref_count[i]++;
356 /* First time we see this L2 cache, add it to our list */
357 i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
359 MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
361 MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
363 parent->l2_cache_core[i] = child->l2_cache_core[0];
364 parent->l2_cache_core_ref_count[i]++;
367 /* Update Broadcast Unit and DLBU */
368 mali_bcast_add_group(parent->bcast_core, child);
369 mali_dlbu_add_group(parent->dlbu_core, child);
371 child_session = child->session;
372 child->session = NULL;
374 /* Above this comment, only software state is updated and the HW is not
375 * touched. Now, check if Mali is powered and skip the rest if it isn't
380 MALI_DEBUG_CODE(mali_group_print_virtual(parent));
385 if (parent->session == child_session) {
386 mali_mmu_zap_tlb(child->mmu);
388 if (NULL == parent->session) {
389 mali_mmu_activate_empty_page_directory(child->mmu);
391 mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
395 /* Update HW only if power is on */
396 mali_bcast_reset(parent->bcast_core);
397 mali_dlbu_update_mask(parent->dlbu_core);
399 /* Start job on child when parent is active */
400 if (NULL != parent->pp_running_job) {
401 struct mali_pp_job *job = parent->pp_running_job;
402 MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
403 child, mali_pp_job_get_id(job), parent));
404 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
405 mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
407 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
408 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
409 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
410 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
412 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
413 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
414 MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
415 mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
418 MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
422 * @brief Remove child group from virtual group parent
424 * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
425 * to IDLE before it can be used.
427 void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
431 MALI_ASSERT_GROUP_LOCKED(parent);
433 MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
435 MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
436 MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
437 MALI_DEBUG_ASSERT(parent == child->parent_group);
438 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
439 /* Removing groups while running is not yet supported. */
440 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
442 mali_group_lock(child);
444 /* Update Broadcast Unit and DLBU */
445 mali_bcast_remove_group(parent->bcast_core, child);
446 mali_dlbu_remove_group(parent->dlbu_core, child);
448 /* Update HW only if power is on */
449 if (mali_pm_is_power_on()) {
450 mali_bcast_reset(parent->bcast_core);
451 mali_dlbu_update_mask(parent->dlbu_core);
454 _mali_osk_list_delinit(&child->group_list);
456 child->session = parent->session;
457 child->parent_group = NULL;
458 child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
460 /* Keep track of the L2 cache cores of child groups */
461 i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
463 MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
465 parent->l2_cache_core_ref_count[i]--;
467 if (parent->l2_cache_core_ref_count[i] == 0) {
468 parent->l2_cache_core[i] = NULL;
471 MALI_DEBUG_CODE(mali_group_print_virtual(parent));
473 mali_group_unlock(child);
476 struct mali_group *mali_group_acquire_group(struct mali_group *parent)
478 struct mali_group *child;
480 MALI_ASSERT_GROUP_LOCKED(parent);
482 MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
483 MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
485 child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
487 mali_group_remove_group(parent, child);
492 void mali_group_reset(struct mali_group *group)
495 * This function should not be used to abort jobs,
496 * currently only called during insmod and PM resume
498 MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
499 MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
500 MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
502 group->session = NULL;
504 if (NULL != group->dlbu_core) {
505 mali_dlbu_reset(group->dlbu_core);
508 if (NULL != group->bcast_core) {
509 mali_bcast_reset(group->bcast_core);
512 if (NULL != group->mmu) {
513 mali_group_reset_mmu(group);
516 if (NULL != group->gp_core) {
517 mali_gp_reset(group->gp_core);
520 if (NULL != group->pp_core) {
521 mali_group_reset_pp(group);
525 struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
527 return group->gp_core;
530 struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
532 return group->pp_core;
535 void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
537 struct mali_session_data *session;
539 MALI_ASSERT_GROUP_LOCKED(group);
540 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
542 session = mali_gp_job_get_session(job);
544 if (NULL != group->l2_cache_core[0]) {
545 mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
548 mali_group_activate_page_directory(group, session);
550 mali_gp_job_start(group->gp_core, job);
552 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
553 MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
554 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
555 mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
556 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
557 MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
558 mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
559 #if defined(CONFIG_MALI400_PROFILING)
560 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
561 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
562 mali_group_report_l2_cache_counters_per_core(group, 0);
563 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
565 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
566 trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
567 mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
570 group->gp_running_job = job;
571 group->state = MALI_GROUP_STATE_WORKING;
573 /* Setup the timeout timer value and save the job id for the job running on the gp core */
574 _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
577 void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
579 struct mali_session_data *session;
581 MALI_ASSERT_GROUP_LOCKED(group);
582 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
584 session = mali_pp_job_get_session(job);
586 if (NULL != group->l2_cache_core[0]) {
587 mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
590 if (NULL != group->l2_cache_core[1]) {
591 mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
594 mali_group_activate_page_directory(group, session);
596 if (mali_group_is_virtual(group)) {
597 struct mali_group *child;
598 struct mali_group *temp;
601 MALI_DEBUG_ASSERT( mali_pp_job_is_virtual(job));
603 /* Configure DLBU for the job */
604 mali_dlbu_config_job(group->dlbu_core, job);
606 /* Write stack address for each child group */
607 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
608 mali_pp_write_addr_stack(child->pp_core, job);
612 /* Try to use DMA unit to start job, fallback to writing directly to the core */
613 MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
614 if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
615 mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
618 mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
621 /* if the group is virtual, loop through physical groups which belong to this group
622 * and call profiling events for its cores as virtual */
623 if (MALI_TRUE == mali_group_is_virtual(group)) {
624 struct mali_group *child;
625 struct mali_group *temp;
627 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
628 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
629 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
630 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
631 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
633 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
634 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
635 MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
636 mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
638 #if defined(CONFIG_MALI400_PROFILING)
639 if (0 != group->l2_cache_core_ref_count[0]) {
640 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
641 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
642 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
645 if (0 != group->l2_cache_core_ref_count[1]) {
646 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
647 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
648 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
651 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
652 } else { /* group is physical - call profiling events for physical cores */
653 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
654 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
655 MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
656 mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
658 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
659 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
660 MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
661 mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
662 #if defined(CONFIG_MALI400_PROFILING)
663 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
664 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
665 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
667 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
669 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
670 trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
672 group->pp_running_job = job;
673 group->pp_running_sub_job = sub_job;
674 group->state = MALI_GROUP_STATE_WORKING;
676 /* Setup the timeout timer value and save the job id for the job running on the pp core */
677 _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
680 struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
682 MALI_ASSERT_GROUP_LOCKED(group);
684 if (group->state != MALI_GROUP_STATE_OOM ||
685 mali_gp_job_get_id(group->gp_running_job) != job_id) {
686 return NULL; /* Illegal request or job has already been aborted */
689 if (NULL != group->l2_cache_core[0]) {
690 mali_l2_cache_invalidate(group->l2_cache_core[0]);
693 mali_mmu_zap_tlb_without_stall(group->mmu);
695 mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
697 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
699 group->state = MALI_GROUP_STATE_WORKING;
701 return group->gp_running_job;
704 static void mali_group_reset_mmu(struct mali_group *group)
706 struct mali_group *child;
707 struct mali_group *temp;
708 _mali_osk_errcode_t err;
710 if (!mali_group_is_virtual(group)) {
711 /* This is a physical group or an idle virtual group -- simply wait for
712 * the reset to complete. */
713 err = mali_mmu_reset(group->mmu);
714 MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
715 } else { /* virtual group */
716 err = mali_mmu_reset(group->mmu);
717 if (_MALI_OSK_ERR_OK == err) {
721 /* Loop through all members of this virtual group and wait
722 * until they are done resetting.
724 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
725 err = mali_mmu_reset(child->mmu);
726 MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
731 static void mali_group_reset_pp(struct mali_group *group)
733 struct mali_group *child;
734 struct mali_group *temp;
736 mali_pp_reset_async(group->pp_core);
738 if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
739 /* This is a physical group or an idle virtual group -- simply wait for
740 * the reset to complete. */
741 mali_pp_reset_wait(group->pp_core);
742 } else { /* virtual group */
743 /* Loop through all members of this virtual group and wait until they
744 * are done resetting.
746 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
747 mali_pp_reset_wait(child->pp_core);
752 /* Group must be locked when entering this function. Will be unlocked before exiting. */
753 static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
755 struct mali_pp_job *pp_job_to_return;
756 u32 pp_sub_job_to_return;
758 MALI_DEBUG_ASSERT_POINTER(group);
759 MALI_DEBUG_ASSERT_POINTER(group->pp_core);
760 MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
761 MALI_ASSERT_GROUP_LOCKED(group);
763 mali_group_post_process_job_pp(group);
766 /* Only do soft reset for successful jobs, a full recovery
767 * reset will be done for failed jobs. */
768 mali_pp_reset_async(group->pp_core);
771 pp_job_to_return = group->pp_running_job;
772 pp_sub_job_to_return = group->pp_running_sub_job;
773 group->state = MALI_GROUP_STATE_IDLE;
774 group->pp_running_job = NULL;
777 MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
778 mali_group_recovery_reset(group);
779 } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
780 MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
781 mali_group_recovery_reset(group);
784 /* Return job to user, schedule and unlock group. */
785 mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
788 /* Group must be locked when entering this function. Will be unlocked before exiting. */
789 static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
791 struct mali_gp_job *gp_job_to_return;
793 MALI_DEBUG_ASSERT_POINTER(group);
794 MALI_DEBUG_ASSERT_POINTER(group->gp_core);
795 MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
796 MALI_ASSERT_GROUP_LOCKED(group);
798 mali_group_post_process_job_gp(group, MALI_FALSE);
801 /* Only do soft reset for successful jobs, a full recovery
802 * reset will be done for failed jobs. */
803 mali_gp_reset_async(group->gp_core);
806 gp_job_to_return = group->gp_running_job;
807 group->state = MALI_GROUP_STATE_IDLE;
808 group->gp_running_job = NULL;
811 MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
812 mali_group_recovery_reset(group);
813 } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
814 MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
815 mali_group_recovery_reset(group);
818 /* Return job to user, schedule and unlock group. */
819 mali_gp_scheduler_job_done(group, gp_job_to_return, success);
822 void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
824 MALI_ASSERT_GROUP_LOCKED(group);
826 if (MALI_GROUP_STATE_IDLE == group->state ||
827 mali_gp_job_get_id(group->gp_running_job) != job_id) {
828 return; /* No need to cancel or job has already been aborted or completed */
831 /* Function will unlock the group, so we need to lock it again */
832 mali_group_complete_gp_and_unlock(group, MALI_FALSE);
833 mali_group_lock(group);
836 static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
838 MALI_ASSERT_GROUP_LOCKED(group);
840 if (MALI_GROUP_STATE_IDLE == group->state ||
841 mali_pp_job_get_id(group->pp_running_job) != job_id) {
842 return; /* No need to cancel or job has already been aborted or completed */
845 mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
846 mali_group_lock(group);
849 void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
851 struct mali_gp_job *gp_job;
852 struct mali_pp_job *pp_job;
855 mali_bool abort_pp = MALI_FALSE;
856 mali_bool abort_gp = MALI_FALSE;
858 mali_group_lock(group);
860 if (mali_group_is_in_virtual(group)) {
861 /* Group is member of a virtual group, don't touch it! */
862 mali_group_unlock(group);
866 gp_job = group->gp_running_job;
867 pp_job = group->pp_running_job;
869 if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
870 MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
872 gp_job_id = mali_gp_job_get_id(gp_job);
873 abort_gp = MALI_TRUE;
876 if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
877 MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
879 pp_job_id = mali_pp_job_get_id(pp_job);
880 abort_pp = MALI_TRUE;
884 mali_group_abort_gp_job(group, gp_job_id);
887 mali_group_abort_pp_job(group, pp_job_id);
890 mali_group_remove_session_if_unused(group, session);
892 mali_group_unlock(group);
895 struct mali_group *mali_group_get_glob_group(u32 index)
897 if(mali_global_num_groups > index) {
898 return mali_global_groups[index];
904 u32 mali_group_get_glob_num_groups(void)
906 return mali_global_num_groups;
909 static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
911 MALI_ASSERT_GROUP_LOCKED(group);
913 MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
915 if (group->session != session) {
916 /* Different session than last time, so we need to do some work */
917 MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
918 mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
919 group->session = session;
921 /* Same session as last time, so no work required */
922 MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
923 mali_mmu_zap_tlb_without_stall(group->mmu);
927 static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
929 MALI_ASSERT_GROUP_LOCKED(group);
931 if (MALI_GROUP_STATE_IDLE == group->state) {
932 if (group->session == session) {
933 MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
934 MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
935 MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
936 mali_mmu_activate_empty_page_directory(group->mmu);
937 group->session = NULL;
942 mali_bool mali_group_power_is_on(struct mali_group *group)
944 MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
945 return group->power_is_on;
948 void mali_group_power_on_group(struct mali_group *group)
950 MALI_DEBUG_ASSERT_POINTER(group);
951 MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
952 MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
953 || MALI_GROUP_STATE_IN_VIRTUAL == group->state
954 || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
955 || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
956 || MALI_GROUP_STATE_DISABLED == group->state);
958 MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
960 group->power_is_on = MALI_TRUE;
963 void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
965 MALI_DEBUG_ASSERT_POINTER(group);
966 MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
967 MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
968 || MALI_GROUP_STATE_IN_VIRTUAL == group->state
969 || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
970 || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
971 || MALI_GROUP_STATE_DISABLED == group->state);
973 MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
975 /* It is necessary to set group->session = NULL so that the powered off MMU is not written
976 * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
977 * pending bottom_halves does not access powered off cores. */
979 group->session = NULL;
981 if (do_power_change) {
982 group->power_is_on = MALI_FALSE;
986 void mali_group_power_on(void)
989 for (i = 0; i < mali_global_num_groups; i++) {
990 struct mali_group *group = mali_global_groups[i];
992 mali_group_lock(group);
993 if (MALI_GROUP_STATE_DISABLED == group->state) {
994 MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
996 mali_group_power_on_group(group);
998 mali_group_unlock(group);
1000 MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
1003 void mali_group_power_off(mali_bool do_power_change)
1007 for (i = 0; i < mali_global_num_groups; i++) {
1008 struct mali_group *group = mali_global_groups[i];
1010 mali_group_lock(group);
1011 if (MALI_GROUP_STATE_DISABLED == group->state) {
1012 MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
1014 mali_group_power_off_group(group, do_power_change);
1016 mali_group_unlock(group);
1018 MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
1021 static void mali_group_recovery_reset(struct mali_group *group)
1023 _mali_osk_errcode_t err;
1025 MALI_ASSERT_GROUP_LOCKED(group);
1027 /* Stop cores, bus stop */
1028 if (NULL != group->pp_core) {
1029 mali_pp_stop_bus(group->pp_core);
1031 mali_gp_stop_bus(group->gp_core);
1034 /* Flush MMU and clear page fault (if any) */
1035 mali_mmu_activate_fault_flush_page_directory(group->mmu);
1036 mali_mmu_page_fault_done(group->mmu);
1038 /* Wait for cores to stop bus, then do a hard reset on them */
1039 if (NULL != group->pp_core) {
1040 if (mali_group_is_virtual(group)) {
1041 struct mali_group *child, *temp;
1043 /* Disable the broadcast unit while we do reset directly on the member cores. */
1044 mali_bcast_disable(group->bcast_core);
1046 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1047 mali_pp_stop_bus_wait(child->pp_core);
1048 mali_pp_hard_reset(child->pp_core);
1051 mali_bcast_enable(group->bcast_core);
1053 mali_pp_stop_bus_wait(group->pp_core);
1054 mali_pp_hard_reset(group->pp_core);
1057 mali_gp_stop_bus_wait(group->gp_core);
1058 mali_gp_hard_reset(group->gp_core);
1062 err = mali_mmu_reset(group->mmu);
1063 MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
1066 group->session = NULL;
1069 #if MALI_STATE_TRACKING
1070 u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
1074 n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
1075 n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
1076 if (group->gp_core) {
1077 n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
1078 n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
1080 if (group->pp_core) {
1081 n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
1082 n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
1083 group->pp_running_job, group->pp_running_sub_job);
1090 /* Group must be locked when entering this function. Will be unlocked before exiting. */
1091 static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
1093 MALI_DEBUG_ASSERT_POINTER(group);
1094 MALI_ASSERT_GROUP_LOCKED(group);
1096 if (NULL != group->pp_core) {
1097 struct mali_pp_job *pp_job_to_return;
1098 u32 pp_sub_job_to_return;
1100 MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
1102 mali_group_post_process_job_pp(group);
1104 pp_job_to_return = group->pp_running_job;
1105 pp_sub_job_to_return = group->pp_running_sub_job;
1106 group->state = MALI_GROUP_STATE_IDLE;
1107 group->pp_running_job = NULL;
1109 mali_group_recovery_reset(group); /* This will also clear the page fault itself */
1111 /* Will unlock group. */
1112 mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
1114 struct mali_gp_job *gp_job_to_return;
1116 MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
1118 mali_group_post_process_job_gp(group, MALI_FALSE);
1120 gp_job_to_return = group->gp_running_job;
1121 group->state = MALI_GROUP_STATE_IDLE;
1122 group->gp_running_job = NULL;
1124 mali_group_recovery_reset(group); /* This will also clear the page fault itself */
1126 /* Will unlock group. */
1127 mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
1131 _mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
1133 _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
1134 struct mali_group *group = (struct mali_group *)data;
1135 struct mali_mmu_core *mmu = group->mmu;
1138 MALI_DEBUG_ASSERT_POINTER(mmu);
1140 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1141 if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
1146 /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
1147 int_stat = mali_mmu_get_int_status(mmu);
1148 if (0 != int_stat) {
1149 struct mali_group *parent = group->parent_group;
1151 /* page fault or bus error, we thread them both in the same way */
1152 mali_mmu_mask_all_interrupts(mmu);
1153 if (NULL == parent) {
1154 _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
1156 _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
1158 err = _MALI_OSK_ERR_OK;
1163 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1164 mali_pm_domain_unlock_state(group->pm_domain);
1170 static void mali_group_bottom_half_mmu(void * data)
1172 struct mali_group *group = (struct mali_group *)data;
1173 struct mali_mmu_core *mmu = group->mmu;
1175 MALI_DEBUG_CODE(u32 status);
1177 MALI_DEBUG_ASSERT_POINTER(mmu);
1179 mali_group_lock(group);
1181 MALI_DEBUG_ASSERT(NULL == group->parent_group);
1183 if ( MALI_FALSE == mali_group_power_is_on(group) ) {
1184 MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
1185 mali_group_unlock(group);
1189 rawstat = mali_mmu_get_rawstat(mmu);
1190 MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
1192 MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
1194 if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
1195 /* An actual page fault has occurred. */
1197 u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
1198 MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
1199 (void*)fault_address,
1200 (status >> 6) & 0x1F,
1201 (status & 32) ? "write" : "read",
1202 mmu->hw_core.description));
1205 mali_group_mmu_page_fault_and_unlock(group);
1209 mali_group_unlock(group);
1212 _mali_osk_errcode_t mali_group_upper_half_gp(void *data)
1214 _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
1215 struct mali_group *group = (struct mali_group *)data;
1216 struct mali_gp_core *core = group->gp_core;
1219 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1220 if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
1225 irq_readout = mali_gp_get_int_stat(core);
1227 if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
1228 /* Mask out all IRQs from this core until IRQ is handled */
1229 mali_gp_mask_all_interrupts(core);
1231 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
1233 /* We do need to handle this in a bottom half */
1234 _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
1236 err = _MALI_OSK_ERR_OK;
1241 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1242 mali_pm_domain_unlock_state(group->pm_domain);
1248 static void mali_group_bottom_half_gp(void *data)
1250 struct mali_group *group = (struct mali_group *)data;
1254 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
1256 mali_group_lock(group);
1258 if ( MALI_FALSE == mali_group_power_is_on(group) ) {
1259 MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
1260 mali_group_unlock(group);
1261 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
1265 irq_readout = mali_gp_read_rawstat(group->gp_core);
1267 MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
1269 if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
1270 u32 core_status = mali_gp_read_core_status(group->gp_core);
1271 if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
1272 MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
1273 group->core_timed_out = MALI_FALSE;
1274 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1275 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1276 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1277 0, _mali_osk_get_tid(), 0, 0, 0);
1279 mali_group_complete_gp_and_unlock(group, MALI_TRUE);
1285 * Now lets look at the possible error cases (IRQ indicating error or timeout)
1286 * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
1288 irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
1289 if (0 != irq_errors) {
1290 MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
1291 group->core_timed_out = MALI_FALSE;
1292 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1293 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1294 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1295 0, _mali_osk_get_tid(), 0, 0, 0);
1297 mali_group_complete_gp_and_unlock(group, MALI_FALSE);
1299 } else if (group->core_timed_out) { /* SW timeout */
1300 group->core_timed_out = MALI_FALSE;
1301 if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
1302 MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
1304 mali_group_complete_gp_and_unlock(group, MALI_FALSE);
1307 } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
1308 /* GP wants more memory in order to continue. */
1309 MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
1311 group->state = MALI_GROUP_STATE_OOM;
1312 mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
1313 mali_gp_scheduler_oom(group, group->gp_running_job);
1314 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
1319 * The only way to get here is if we only got one of two needed END_CMD_LST
1320 * interrupts. Enable all but not the complete interrupt that has been
1321 * received and continue to run.
1323 mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
1324 mali_group_unlock(group);
1326 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
1329 static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
1331 /* Stop the timeout timer. */
1332 _mali_osk_timer_del_async(group->timeout_timer);
1334 if (NULL == group->gp_running_job) {
1339 mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
1341 #if defined(CONFIG_MALI400_PROFILING)
1343 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
1344 mali_gp_job_get_perf_counter_value0(group->gp_running_job),
1345 mali_gp_job_get_perf_counter_value1(group->gp_running_job),
1346 mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
1349 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
1350 mali_gp_job_get_perf_counter_value0(group->gp_running_job),
1351 mali_gp_job_get_perf_counter_value1(group->gp_running_job),
1352 mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
1355 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1356 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
1357 mali_group_report_l2_cache_counters_per_core(group, 0);
1361 mali_gp_job_set_current_heap_addr(group->gp_running_job,
1362 mali_gp_read_plbu_alloc_start_addr(group->gp_core));
1365 _mali_osk_errcode_t mali_group_upper_half_pp(void *data)
1367 _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
1368 struct mali_group *group = (struct mali_group *)data;
1369 struct mali_pp_core *core = group->pp_core;
1372 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1373 if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
1379 * For Mali-450 there is one particular case we need to watch out for:
1381 * Criteria 1) this function call can be due to a shared interrupt,
1382 * and not necessary because this core signaled an interrupt.
1383 * Criteria 2) this core is a part of a virtual group, and thus it should
1384 * not do any post processing.
1385 * Criteria 3) this core has actually indicated that is has completed by
1386 * having set raw_stat/int_stat registers to != 0
1388 * If all this criteria is meet, then we could incorrectly start post
1389 * processing on the wrong group object (this should only happen on the
1392 #if !defined(MALI_UPPER_HALF_SCHEDULING)
1393 if (mali_group_is_in_virtual(group)) {
1395 * This check is done without the group lock held, which could lead to
1396 * a potential race. This is however ok, since we will safely re-check
1397 * this with the group lock held at a later stage. This is just an
1398 * early out which will strongly benefit shared IRQ systems.
1400 err = _MALI_OSK_ERR_OK;
1405 irq_readout = mali_pp_get_int_stat(core);
1406 if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
1407 /* Mask out all IRQs from this core until IRQ is handled */
1408 mali_pp_mask_all_interrupts(core);
1410 #if defined(CONFIG_MALI400_PROFILING)
1411 /* Currently no support for this interrupt event for the virtual PP core */
1412 if (!mali_group_is_virtual(group)) {
1413 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
1414 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
1415 MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
1416 irq_readout, 0, 0, 0, 0);
1420 #if defined(MALI_UPPER_HALF_SCHEDULING)
1421 /* Check if job is complete without errors */
1422 if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
1423 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1424 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1425 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1426 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
1428 MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
1430 mali_group_lock(group);
1432 /* Check if job is complete without errors, again, after taking the group lock */
1433 irq_readout = mali_pp_read_rawstat(core);
1434 if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
1435 mali_pp_enable_interrupts(core);
1436 mali_group_unlock(group);
1437 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1438 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1439 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1440 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
1441 err = _MALI_OSK_ERR_OK;
1445 if (mali_group_is_virtual(group)) {
1446 u32 status_readout = mali_pp_read_status(group->pp_core);
1447 if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
1448 MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
1449 mali_pp_enable_interrupts(core);
1450 mali_group_unlock(group);
1451 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1452 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1453 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1454 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
1455 err = _MALI_OSK_ERR_OK;
1460 if (mali_group_is_in_virtual(group)) {
1461 /* We're member of a virtual group, so interrupt should be handled by the virtual group */
1462 mali_pp_enable_interrupts(core);
1463 mali_group_unlock(group);
1464 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1465 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1466 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1467 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
1468 err = _MALI_OSK_ERR_FAULT;
1472 group->core_timed_out = MALI_FALSE;
1474 mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
1476 /* No need to enable interrupts again, since the core will be reset while completing the job */
1478 MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
1480 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1481 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1482 MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
1483 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
1485 err = _MALI_OSK_ERR_OK;
1490 /* We do need to handle this in a bottom half */
1491 _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
1492 err = _MALI_OSK_ERR_OK;
1497 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
1498 mali_pm_domain_unlock_state(group->pm_domain);
1504 static void mali_group_bottom_half_pp(void *data)
1506 struct mali_group *group = (struct mali_group *)data;
1507 struct mali_pp_core *core = group->pp_core;
1511 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
1512 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1513 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1514 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
1516 mali_group_lock(group);
1518 if (mali_group_is_in_virtual(group)) {
1519 /* We're member of a virtual group, so interrupt should be handled by the virtual group */
1520 mali_pp_enable_interrupts(core);
1521 mali_group_unlock(group);
1522 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1523 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1524 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1525 0, _mali_osk_get_tid(), 0, 0, 0);
1529 if ( MALI_FALSE == mali_group_power_is_on(group) ) {
1530 MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
1531 mali_group_unlock(group);
1532 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1533 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1534 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1535 0, _mali_osk_get_tid(), 0, 0, 0);
1539 irq_readout = mali_pp_read_rawstat(group->pp_core);
1541 MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
1543 /* Check if job is complete without errors */
1544 if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
1545 if (mali_group_is_virtual(group)) {
1546 u32 status_readout = mali_pp_read_status(group->pp_core);
1548 if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
1549 MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
1550 mali_pp_enable_interrupts(core);
1551 mali_group_unlock(group);
1553 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1554 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1555 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1556 0, _mali_osk_get_tid(), 0, 0, 0);
1561 if (!group->core_timed_out) {
1562 MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
1563 group->core_timed_out = MALI_FALSE;
1565 mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
1567 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1568 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1569 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1570 0, _mali_osk_get_tid(), 0, 0, 0);
1576 * Now lets look at the possible error cases (IRQ indicating error or timeout)
1577 * END_OF_FRAME and HANG interrupts are not considered error.
1579 irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
1580 if (0 != irq_errors) {
1581 MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
1582 irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
1583 group->core_timed_out = MALI_FALSE;
1585 mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
1587 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1588 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1589 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1590 0, _mali_osk_get_tid(), 0, 0, 0);
1592 } else if (group->core_timed_out) { /* SW timeout */
1593 group->core_timed_out = MALI_FALSE;
1594 if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
1595 MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
1596 mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
1598 mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
1600 mali_group_unlock(group);
1603 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1604 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1605 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1606 0, _mali_osk_get_tid(), 0, 0, 0);
1611 * We should never get here, re-enable interrupts and continue
1613 if (0 == irq_readout) {
1614 MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
1615 mali_pp_get_hw_core_desc(group->pp_core)));
1617 MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
1618 mali_pp_get_hw_core_desc(group->pp_core)));
1620 mali_pp_enable_interrupts(core);
1621 mali_group_unlock(group);
1623 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
1624 MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
1625 MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
1626 0, _mali_osk_get_tid(), 0, 0, 0);
1629 static void mali_group_post_process_job_pp(struct mali_group *group)
1631 MALI_ASSERT_GROUP_LOCKED(group);
1633 /* Stop the timeout timer. */
1634 _mali_osk_timer_del_async(group->timeout_timer);
1636 if (NULL != group->pp_running_job) {
1637 if (MALI_TRUE == mali_group_is_virtual(group)) {
1638 struct mali_group *child;
1639 struct mali_group *temp;
1641 /* update performance counters from each physical pp core within this virtual group */
1642 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1643 mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
1646 #if defined(CONFIG_MALI400_PROFILING)
1647 /* send profiling data per physical core */
1648 _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
1649 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
1650 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
1651 MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
1652 mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
1653 mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
1654 mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
1657 if (0 != group->l2_cache_core_ref_count[0]) {
1658 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1659 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
1660 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
1663 if (0 != group->l2_cache_core_ref_count[1]) {
1664 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
1665 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
1666 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
1672 /* update performance counters for a physical group's pp core */
1673 mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
1675 #if defined(CONFIG_MALI400_PROFILING)
1676 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
1677 MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
1678 MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
1679 mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
1680 mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
1681 mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
1684 if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
1685 (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
1686 mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
1693 static void mali_group_timeout(void *data)
1695 struct mali_group *group = (struct mali_group *)data;
1697 group->core_timed_out = MALI_TRUE;
1699 if (NULL != group->gp_core) {
1700 MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
1701 _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
1703 MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
1704 _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
1708 void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
1710 MALI_DEBUG_ASSERT_POINTER(group);
1711 MALI_DEBUG_ASSERT_POINTER(session);
1713 /* Early out - safe even if mutex is not held */
1714 if (group->session != session) return;
1716 mali_group_lock(group);
1718 mali_group_remove_session_if_unused(group, session);
1720 if (group->session == session) {
1721 /* The Zap also does the stall and disable_stall */
1722 mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
1723 if (MALI_TRUE != zap_success) {
1724 MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
1726 mali_group_mmu_page_fault_and_unlock(group);
1731 mali_group_unlock(group);
1734 #if defined(CONFIG_MALI400_PROFILING)
1735 static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
1741 u32 profiling_channel = 0;
1745 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1746 MALI_PROFILING_EVENT_CHANNEL_GPU |
1747 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
1750 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1751 MALI_PROFILING_EVENT_CHANNEL_GPU |
1752 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
1755 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1756 MALI_PROFILING_EVENT_CHANNEL_GPU |
1757 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
1760 profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
1761 MALI_PROFILING_EVENT_CHANNEL_GPU |
1762 MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
1766 if (0 == core_num) {
1767 mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
1769 if (1 == core_num) {
1770 if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
1771 mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
1772 } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
1773 mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
1776 if (2 == core_num) {
1777 if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
1778 mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
1779 } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
1780 mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
1784 _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
1786 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
1788 mali_bool mali_group_is_enabled(struct mali_group *group)
1790 mali_bool enabled = MALI_TRUE;
1792 MALI_DEBUG_ASSERT_POINTER(group);
1794 mali_group_lock(group);
1795 if (MALI_GROUP_STATE_DISABLED == group->state) {
1796 enabled = MALI_FALSE;
1798 mali_group_unlock(group);
1803 void mali_group_enable(struct mali_group *group)
1805 MALI_DEBUG_ASSERT_POINTER(group);
1806 MALI_DEBUG_ASSERT( NULL != mali_group_get_pp_core(group)
1807 || NULL != mali_group_get_gp_core(group));
1809 if (NULL != mali_group_get_pp_core(group)) {
1810 mali_pp_scheduler_enable_group(group);
1812 mali_gp_scheduler_enable_group(group);
1816 void mali_group_disable(struct mali_group *group)
1818 MALI_DEBUG_ASSERT_POINTER(group);
1819 MALI_DEBUG_ASSERT( NULL != mali_group_get_pp_core(group)
1820 || NULL != mali_group_get_gp_core(group));
1822 if (NULL != mali_group_get_pp_core(group)) {
1823 mali_pp_scheduler_disable_group(group);
1825 mali_gp_scheduler_disable_group(group);
1829 static struct mali_pm_domain* mali_group_get_l2_domain(struct mali_group *group)
1831 MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
1833 /* l2_cache_core[0] stores the related l2 domain */
1834 return group->l2_cache_core[0]->pm_domain;
1837 void mali_group_get_pm_domain_ref(struct mali_group *group)
1839 MALI_DEBUG_ASSERT_POINTER(group);
1841 /* Get group used l2 domain ref */
1842 mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
1843 /* Get group used core domain ref */
1844 mali_pm_domain_ref_get(group->pm_domain);
1847 void mali_group_put_pm_domain_ref(struct mali_group *group)
1849 MALI_DEBUG_ASSERT_POINTER(group);
1851 /* Put group used core domain ref */
1852 mali_pm_domain_ref_put(group->pm_domain);
1853 /* Put group used l2 domain ref */
1854 mali_pm_domain_ref_put(mali_group_get_l2_domain(group));