tizen 2.4 release accepted/tizen_2.4_mobile tizen_2.4 accepted/tizen/2.4/mobile/20151029.025026 submit/tizen_2.4/20151028.065640 tizen_2.4_mobile_release
authorjk7744.park <jk7744.park@samsung.com>
Sat, 24 Oct 2015 09:11:45 +0000 (18:11 +0900)
committerjk7744.park <jk7744.park@samsung.com>
Sat, 24 Oct 2015 09:11:45 +0000 (18:11 +0900)
346 files changed:
Documentation/security/Smack.txt
arch/arm/configs/trats2_defconfig
drivers/gpu/Makefile
drivers/gpu/arm/Kconfig [new file with mode: 0644]
drivers/gpu/arm/mali400/Kconfig [new file with mode: 0644]
drivers/gpu/arm/mali400/Makefile [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/Kbuild [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/Kconfig [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/MALI_CONFIGURATION [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/Makefile [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/__malidrv_build_info.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_block_allocator.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_block_allocator.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_broadcast.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_broadcast.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_device_pause_resume.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_device_pause_resume.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_dlbu.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_dlbu.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_gp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_gp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_gp_job.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_gp_job.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_group.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_group.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_hw_core.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_hw_core.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_common.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_core.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_core.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_descriptor_mapping.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_descriptor_mapping.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_mem_os.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_mem_os.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_memory_engine.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_memory_engine.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_utilization.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_utilization.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_kernel_vsync.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_l2_cache.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_l2_cache.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_mem_validation.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_mem_validation.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_memory.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_memory.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_mmu.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_mmu.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_osk.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_osk_bitops.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_osk_list.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_osk_mali.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_osk_profiling.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pmu.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pmu.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pp_job.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pp_job.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pp_scheduler.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_pp_scheduler.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_scheduler.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_scheduler.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_session.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_session.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_ukk.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_user_settings_db.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/common/mali_user_settings_db.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_counters.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_ioctl.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_profiling_events.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_uk_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/license/gpl/mali_kernel_license.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_dma_buf.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_dma_buf.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_kernel_linux.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_kernel_linux.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_kernel_sysfs.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_kernel_sysfs.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_linux_pm_testsuite.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_linux_trace.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_atomics.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_irq.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_locks.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_low_level_mem.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_mali.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_math.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_memory.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_misc.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_notification.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_pm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_profiling.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_specific.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_time.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_timers.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_wait_queue.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_osk_wq.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_pmu_power_up_down.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_profiling_events.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_profiling_internal.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_profiling_internal.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_sync.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_sync.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_sync_user.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_uk_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_core.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_gp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_mem.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_pp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_profiling.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_vsync.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/linux/mali_ukk_wrappers.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/platform/exynos4/exynos4.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/platform/exynos4/exynos4.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/platform/mali_platform.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/platform/redwood/exynos4.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/platform/redwood/exynos4_pmm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/platform/redwood/exynos4_pmm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/regs/mali_200_regs.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/regs/mali_gp_regs.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/timestamp-arm11-cc/mali_timestamp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/timestamp-arm11-cc/mali_timestamp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/timestamp-default/mali_timestamp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/mali/timestamp-default/mali_timestamp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/Kbuild [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/Kconfig [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/MALI_CONFIGURATION [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/Makefile [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/__malidrv_build_info.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_broadcast.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_broadcast.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dlbu.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dlbu.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dma.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dma.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_job.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_job.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_scheduler.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_scheduler.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_group.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_group.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_hw_core.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_hw_core.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_common.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_core.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_core.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_descriptor_mapping.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_descriptor_mapping.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_utilization.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_utilization.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_vsync.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_l2_cache.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_l2_cache.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mem_validation.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mem_validation.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu_page_directory.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu_page_directory.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_bitops.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_list.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_mali.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_profiling.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm_domain.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm_domain.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pmu.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pmu.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_job.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_job.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_scheduler.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_scheduler.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_session.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_session.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_soft_job.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_soft_job.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_spinlock_reentrant.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_spinlock_reentrant.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_fence_wait.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_fence_wait.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_sync_fence.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_sync_fence.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_ukk.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_user_settings_db.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/common/mali_user_settings_db.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_counters.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_ioctl.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_profiling_events.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_profiling_gator_api.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_uk_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/license/gpl/mali_kernel_license.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_device_pause_resume.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_linux.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_linux.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_sysfs.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_sysfs.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_linux_trace.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_block_alloc.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_block_alloc.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_dma_buf.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_dma_buf.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_external.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_os_alloc.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_os_alloc.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_ump.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_atomics.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_irq.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_locks.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_locks.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_low_level_mem.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_mali.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_math.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_memory.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_misc.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_notification.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_pm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_profiling.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_specific.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_time.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_timers.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_wait_queue.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_wq.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_pmu_power_up_down.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_events.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_gator_api.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_internal.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_internal.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_sync.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_sync.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_uk_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_core.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_gp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_mem.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_pp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_profiling.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_soft_job.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_timeline.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_vsync.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_wrappers.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3_pmm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3_pmm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4_pmm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4_pmm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4_pmm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4_pmm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4_pmm.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4_pmm.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/regs/mali_200_regs.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/regs/mali_gp_regs.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/timestamp-arm11-cc/mali_timestamp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/timestamp-arm11-cc/mali_timestamp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/timestamp-default/mali_timestamp.c [new file with mode: 0644]
drivers/gpu/arm/mali400/r4p0_rel0/timestamp-default/mali_timestamp.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/.gitignore [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/Kbuild [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/Kconfig [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/Makefile [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/Makefile.common [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/arch-pb-virtex5/config.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/arch-pegasus-m400/config.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/arch-release/config.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_api.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_common.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_common.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_descriptor_mapping.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_descriptor_mapping.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_memory_backend.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_ref_drv.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_kernel_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_osk.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/common/ump_ukk.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/include/ump_kernel_interface.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/include/ump_kernel_interface_ref_drv.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/include/ump_kernel_platform.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/include/ump_uk_types.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/license/gpl/ump_kernel_license.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_ioctl.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_kernel_linux.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_kernel_linux.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_dedicated.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_dedicated.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_os.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_os.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_memory_backend.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_osk_atomics.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_osk_low_level_mem.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_osk_misc.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_ukk_ref_wrappers.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_ukk_ref_wrappers.h [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_ukk_wrappers.c [new file with mode: 0644]
drivers/gpu/arm/mali400/ump/linux/ump_ukk_wrappers.h [new file with mode: 0644]
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_drm_fb.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-samsung.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/slp_global_lock.c [new file with mode: 0644]
drivers/misc/slp_global_lock.h [new file with mode: 0644]
drivers/sensor/ak8975.c
drivers/tty/vt/consolemap.c
drivers/video/Kconfig
include/drm/drm.h
include/drm/drmP.h
include/linux/input.h
packaging/linux-3.0.spec
security/smack/smack.h
security/smack/smack_access.c
security/smack/smack_lsm.c
security/smack/smackfs.c

index 57463a7..7500003 100644 (file)
@@ -200,11 +200,11 @@ netlabel
        label. The format accepted on write is:
                "%d.%d.%d.%d label" or "%d.%d.%d.%d/%d label".
 onlycap
        label. The format accepted on write is:
                "%d.%d.%d.%d label" or "%d.%d.%d.%d/%d label".
 onlycap
-       This contains the label processes must have for CAP_MAC_ADMIN
+       This contains labels processes must have for CAP_MAC_ADMIN
        and CAP_MAC_OVERRIDE to be effective. If this file is empty
        these capabilities are effective at for processes with any
        and CAP_MAC_OVERRIDE to be effective. If this file is empty
        these capabilities are effective at for processes with any
-       label. The value is set by writing the desired label to the
-       file or cleared by writing "-" to the file.
+       label. The values are set by writing the desired labels, separated
+       by spaces, to the file or cleared by writing "-" to the file.
 revoke-subject
        Writing a Smack label here sets the access to '-' for all access
        rules with that subject label.
 revoke-subject
        Writing a Smack label here sets the access to '-' for all access
        rules with that subject label.
index 74fa11f..e3813e6 100644 (file)
@@ -757,7 +757,7 @@ CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
+CONFIG_XFRM_USER=y
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
 # CONFIG_XFRM_STATISTICS is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
 # CONFIG_XFRM_STATISTICS is not set
@@ -766,23 +766,28 @@ CONFIG_NET_KEY=y
 # CONFIG_NET_KEY_MIGRATE is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
 # CONFIG_NET_KEY_MIGRATE is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
 CONFIG_IP_PNP=y
 # CONFIG_IP_PNP_DHCP is not set
 # CONFIG_IP_PNP_BOOTP is not set
 # CONFIG_IP_PNP_RARP is not set
 # CONFIG_NET_IPIP is not set
 CONFIG_IP_PNP=y
 # CONFIG_IP_PNP_DHCP is not set
 # CONFIG_IP_PNP_BOOTP is not set
 # CONFIG_IP_PNP_RARP is not set
 # CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
 # CONFIG_ARPD is not set
 # CONFIG_SYN_COOKIES is not set
 # CONFIG_ARPD is not set
 # CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
+CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_ESP=y
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
 CONFIG_INET_TUNNEL=y
 CONFIG_INET_XFRM_MODE_TRANSPORT=y
 CONFIG_INET_TUNNEL=y
 CONFIG_INET_XFRM_MODE_TRANSPORT=y
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
@@ -832,6 +837,7 @@ CONFIG_NF_CONNTRACK=y
 # CONFIG_NF_CONNTRACK_EVENTS is not set
 # CONFIG_NF_CONNTRACK_TIMESTAMP is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 # CONFIG_NF_CONNTRACK_EVENTS is not set
 # CONFIG_NF_CONNTRACK_TIMESTAMP is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
+CONFIG_NF_CT_PROTO_GRE=y
 # CONFIG_NF_CT_PROTO_SCTP is not set
 # CONFIG_NF_CT_PROTO_UDPLITE is not set
 # CONFIG_NF_CONNTRACK_AMANDA is not set
 # CONFIG_NF_CT_PROTO_SCTP is not set
 # CONFIG_NF_CT_PROTO_UDPLITE is not set
 # CONFIG_NF_CONNTRACK_AMANDA is not set
@@ -840,7 +846,7 @@ CONFIG_NF_CONNTRACK=y
 # CONFIG_NF_CONNTRACK_IRC is not set
 # CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
 # CONFIG_NF_CONNTRACK_SNMP is not set
 # CONFIG_NF_CONNTRACK_IRC is not set
 # CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
 # CONFIG_NF_CONNTRACK_SNMP is not set
-# CONFIG_NF_CONNTRACK_PPTP is not set
+CONFIG_NF_CONNTRACK_PPTP=y
 # CONFIG_NF_CONNTRACK_SANE is not set
 # CONFIG_NF_CONNTRACK_SIP is not set
 # CONFIG_NF_CONNTRACK_TFTP is not set
 # CONFIG_NF_CONNTRACK_SANE is not set
 # CONFIG_NF_CONNTRACK_SIP is not set
 # CONFIG_NF_CONNTRACK_TFTP is not set
@@ -940,11 +946,12 @@ CONFIG_NF_NAT_NEEDED=y
 CONFIG_IP_NF_TARGET_MASQUERADE=y
 CONFIG_IP_NF_TARGET_NETMAP=y
 CONFIG_IP_NF_TARGET_REDIRECT=y
 CONFIG_IP_NF_TARGET_MASQUERADE=y
 CONFIG_IP_NF_TARGET_NETMAP=y
 CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_NF_NAT_PROTO_GRE=y
 # CONFIG_NF_NAT_FTP is not set
 # CONFIG_NF_NAT_IRC is not set
 # CONFIG_NF_NAT_TFTP is not set
 # CONFIG_NF_NAT_AMANDA is not set
 # CONFIG_NF_NAT_FTP is not set
 # CONFIG_NF_NAT_IRC is not set
 # CONFIG_NF_NAT_TFTP is not set
 # CONFIG_NF_NAT_AMANDA is not set
-# CONFIG_NF_NAT_PPTP is not set
+CONFIG_NF_NAT_PPTP=y
 # CONFIG_NF_NAT_H323 is not set
 # CONFIG_NF_NAT_SIP is not set
 CONFIG_IP_NF_MANGLE=y
 # CONFIG_NF_NAT_H323 is not set
 # CONFIG_NF_NAT_SIP is not set
 CONFIG_IP_NF_MANGLE=y
@@ -969,7 +976,11 @@ CONFIG_IP_NF_ARP_MANGLE=y
 # CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_RDS is not set
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
-# CONFIG_L2TP is not set
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
 # CONFIG_BRIDGE is not set
 # CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_BRIDGE is not set
 # CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
@@ -1059,7 +1070,6 @@ CONFIG_XPS=y
 CONFIG_BT=y
 CONFIG_BT_BREDR=y
 CONFIG_BT_RFCOMM=y
 CONFIG_BT=y
 CONFIG_BT_BREDR=y
 CONFIG_BT_RFCOMM=y
-CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=y
 # CONFIG_BT_BNEP_MC_FILTER is not set
 # CONFIG_BT_BNEP_PROTO_FILTER is not set
 CONFIG_BT_BNEP=y
 # CONFIG_BT_BNEP_MC_FILTER is not set
 # CONFIG_BT_BNEP_PROTO_FILTER is not set
@@ -1229,6 +1239,7 @@ CONFIG_LINK_DEVICE_HSIC=y
 # CONFIG_INTERNAL_MODEM_IF is not set
 CONFIG_SLP_LOWMEM_NOTIFY=y
 CONFIG_SLP_PROCESS_MON=y
 # CONFIG_INTERNAL_MODEM_IF is not set
 CONFIG_SLP_LOWMEM_NOTIFY=y
 CONFIG_SLP_PROCESS_MON=y
+CONFIG_SLP_GLOBAL_LOCK=y
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
@@ -1284,7 +1295,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
+CONFIG_TUN=y
 # CONFIG_VETH is not set
 # CONFIG_MII is not set
 # CONFIG_PHYLIB is not set
 # CONFIG_VETH is not set
 # CONFIG_MII is not set
 # CONFIG_PHYLIB is not set
@@ -1327,8 +1338,21 @@ CONFIG_WLAN_REGION_CODE=100
 #
 # CAIF transport drivers
 #
 #
 # CAIF transport drivers
 #
-# CONFIG_PPP is not set
+CONFIG_PPP=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
 # CONFIG_SLIP is not set
 # CONFIG_SLIP is not set
+CONFIG_SLHC=y
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -1381,7 +1405,31 @@ CONFIG_KEYBOARD_GPIO=y
 # CONFIG_KEYBOARD_XTKBD is not set
 # CONFIG_KEYBOARD_CYPRESS_TOUCH is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_KEYBOARD_XTKBD is not set
 # CONFIG_KEYBOARD_CYPRESS_TOUCH is not set
 # CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+# CONFIG_JOYSTICK_XPAD_LEDS is not set
 # CONFIG_INPUT_TABLET is not set
 CONFIG_INPUT_TOUCHSCREEN=y
 # CONFIG_TOUCHSCREEN_MELFAS_GC is not set
 # CONFIG_INPUT_TABLET is not set
 CONFIG_INPUT_TOUCHSCREEN=y
 # CONFIG_TOUCHSCREEN_MELFAS_GC is not set
@@ -2084,20 +2132,8 @@ CONFIG_VIDEO_MFC5X=y
 CONFIG_VIDEO_MFC_MAX_INSTANCE=8
 # CONFIG_VIDEO_MFC5X_DEBUG is not set
 CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN=y
 CONFIG_VIDEO_MFC_MAX_INSTANCE=8
 # CONFIG_VIDEO_MFC5X_DEBUG is not set
 CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN=y
-CONFIG_VIDEO_UMP=y
-# CONFIG_UMP_DED_ONLY is not set
-CONFIG_UMP_OSMEM_ONLY=y
-# CONFIG_UMP_VCM_ONLY is not set
-CONFIG_UMP_MEM_SIZE=512
-# CONFIG_VIDEO_UMP_DEBUG is not set
-# CONFIG_VIDEO_MALI400MP_DEBUG is not set
-CONFIG_VIDEO_MALI400MP_DVFS=y
-CONFIG_VIDEO_MALI400MP_R3P0=y
-# CONFIG_MALI_DED_ONLY_R3P0 is not set
-# CONFIG_MALI_DED_MMU_R3P0 is not set
-CONFIG_MALI_OSMEM_ONLY_R3P0=y
-# CONFIG_MALI_DED_OSMEM_R3P0 is not set
-CONFIG_VIDEO_MALI400MP_GLOBAL_LOCK=y
+# CONFIG_VIDEO_UMP is not set
+# CONFIG_VIDEO_MALI400MP_R3P0 is not set
 # CONFIG_VIDEO_FIMG2D is not set
 # CONFIG_VIDEO_FIMG2D4X is not set
 CONFIG_VIDEO_JPEG_V2X=y
 # CONFIG_VIDEO_FIMG2D is not set
 # CONFIG_VIDEO_FIMG2D4X is not set
 CONFIG_VIDEO_JPEG_V2X=y
@@ -2168,6 +2204,22 @@ CONFIG_DRM_EXYNOS_IPP=y
 CONFIG_DRM_EXYNOS_FIMC=y
 # CONFIG_DRM_UDL is not set
 # CONFIG_ION is not set
 CONFIG_DRM_EXYNOS_FIMC=y
 # CONFIG_DRM_UDL is not set
 # CONFIG_ION is not set
+
+#
+# ARM GPU Configuration
+#
+CONFIG_MALI400=y
+# CONFIG_MALI_VER_R3P2_REL0 is not set
+# CONFIG_MALI400_DEBUG is not set
+# CONFIG_MALI400_PROFILING is not set
+# CONFIG_MALI400_UMP is not set
+CONFIG_MALI_DVFS=y
+CONFIG_MALI_VER_R4P0_REL0=y
+# CONFIG_MALI_DVFS_FULL_LEVEL is not set
+# CONFIG_MALI400_POWER_PERFORMANCE_POLICY is not set
+CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+# CONFIG_MALI_SHARED_INTERRUPTS is not set
+# CONFIG_MALI_PMU_PARALLEL_POWER_UP is not set
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
 CONFIG_FB=y
 # CONFIG_VGASTATE is not set
 # CONFIG_VIDEO_OUTPUT_CONTROL is not set
 CONFIG_FB=y
@@ -3095,12 +3147,13 @@ CONFIG_CRYPTO_BLKCIPHER=y
 CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
 CONFIG_CRYPTO_HASH2=y
 CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
 CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
 CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_PCOMP2=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_MANAGER2=y
 CONFIG_CRYPTO_MANAGER_TESTS=y
 CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_PCOMP2=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_MANAGER2=y
 CONFIG_CRYPTO_MANAGER_TESTS=y
-# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_GF128MUL=y
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_PCRYPT is not set
 CONFIG_CRYPTO_WORKQUEUE=y
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_PCRYPT is not set
 CONFIG_CRYPTO_WORKQUEUE=y
@@ -3112,14 +3165,14 @@ CONFIG_CRYPTO_AUTHENC=y
 # Authenticated Encryption with Associated Data
 #
 # CONFIG_CRYPTO_CCM is not set
 # Authenticated Encryption with Associated Data
 #
 # CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_SEQIV=y
 
 #
 # Block modes
 #
 CONFIG_CRYPTO_CBC=y
 
 #
 # Block modes
 #
 CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_CTR is not set
+CONFIG_CRYPTO_CTR=y
 # CONFIG_CRYPTO_CTS is not set
 CONFIG_CRYPTO_ECB=y
 # CONFIG_CRYPTO_LRW is not set
 # CONFIG_CRYPTO_CTS is not set
 CONFIG_CRYPTO_ECB=y
 # CONFIG_CRYPTO_LRW is not set
@@ -3138,7 +3191,7 @@ CONFIG_CRYPTO_HMAC=y
 # Digest
 #
 CONFIG_CRYPTO_CRC32C=y
 # Digest
 #
 CONFIG_CRYPTO_CRC32C=y
-# CONFIG_CRYPTO_GHASH is not set
+CONFIG_CRYPTO_GHASH=y
 # CONFIG_CRYPTO_MD4 is not set
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
 # CONFIG_CRYPTO_MD4 is not set
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -3147,7 +3200,7 @@ CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_RMD256 is not set
 # CONFIG_CRYPTO_RMD320 is not set
 CONFIG_CRYPTO_SHA1=y
 # CONFIG_CRYPTO_RMD256 is not set
 # CONFIG_CRYPTO_RMD320 is not set
 CONFIG_CRYPTO_SHA1=y
-# CONFIG_CRYPTO_SHA256 is not set
+CONFIG_CRYPTO_SHA256=y
 # CONFIG_CRYPTO_SHA512 is not set
 # CONFIG_CRYPTO_TGR192 is not set
 # CONFIG_CRYPTO_WP512 is not set
 # CONFIG_CRYPTO_SHA512 is not set
 # CONFIG_CRYPTO_TGR192 is not set
 # CONFIG_CRYPTO_WP512 is not set
@@ -3157,7 +3210,7 @@ CONFIG_CRYPTO_SHA1=y
 #
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANUBIS is not set
 #
 CONFIG_CRYPTO_AES=y
 # CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_ARC4 is not set
+CONFIG_CRYPTO_ARC4=y
 # CONFIG_CRYPTO_BLOWFISH is not set
 # CONFIG_CRYPTO_CAMELLIA is not set
 # CONFIG_CRYPTO_CAST5 is not set
 # CONFIG_CRYPTO_BLOWFISH is not set
 # CONFIG_CRYPTO_CAMELLIA is not set
 # CONFIG_CRYPTO_CAST5 is not set
index 1bc6689..57b370e 100644 (file)
@@ -1,2 +1,2 @@
-obj-y                  += drm/ vga/ stub/ ion/
+obj-y                  += drm/ vga/ stub/ ion/ arm/mali400/
 
 
diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig
new file mode 100644 (file)
index 0000000..3f33b8f
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+#
+
+
+menu "ARM GPU Configuration"
+
+source "drivers/gpu/arm/mali400/Kconfig"
+
+endmenu
diff --git a/drivers/gpu/arm/mali400/Kconfig b/drivers/gpu/arm/mali400/Kconfig
new file mode 100644 (file)
index 0000000..630d6de
--- /dev/null
@@ -0,0 +1,35 @@
+menuconfig MALI400
+       tristate "Mali-400 support"
+       default n
+       depends on ARM
+       select DMA_SHARED_BUFFER
+       ---help---
+         This enables support for the ARM Mali-400 GPUs.
+
+         To compile this driver as a module, choose M here: the module will be
+         called mali.
+
+choice
+depends on MALI400
+prompt "Select MALI VER"
+default MALI_VER_R3P2_REL0
+
+config MALI_VER_R3P2_REL0
+       bool "Mali400 Version R3P2-REL0"
+       help
+         Choose this option to select Mali400 DDK R3P2-REL0 version.
+
+if MALI_VER_R3P2_REL0
+source "drivers/gpu/arm/mali400/mali/Kconfig"
+endif
+
+config MALI_VER_R4P0_REL0
+       bool "Mali400 Version R4P0-REL0"
+       help
+         Choose this option to select Mali400 DDK R4P0-REL0 version.
+
+if MALI_VER_R4P0_REL0
+source "drivers/gpu/arm/mali400/r4p0_rel0/Kconfig"
+endif
+
+endchoice
diff --git a/drivers/gpu/arm/mali400/Makefile b/drivers/gpu/arm/mali400/Makefile
new file mode 100644 (file)
index 0000000..55410c9
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MALI_VER_R3P2_REL0) += mali/ ump/
+obj-$(CONFIG_MALI_VER_R4P0_REL0) += r4p0_rel0/
+
+EXTRA_CFLAGS += -Idrivers/gpu/arm/mali400
diff --git a/drivers/gpu/arm/mali400/mali/Kbuild b/drivers/gpu/arm/mali400/mali/Kbuild
new file mode 100644 (file)
index 0000000..b17fbc5
--- /dev/null
@@ -0,0 +1,189 @@
+#
+# Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 1
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+# MALI_SEC 
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+TARGET_PLATFORM=exynos4
+include $(srctree)/$(src)/MALI_CONFIGURATION
+MALI_PLATFORM = $(MALI_PLATFORM-$(TARGET_PLATFORM))
+MALI_PLATFORM_FILES = $(subst $(srctree)/$(src)/,,$(wildcard $(srctree)/$(src)/platform/$(MALI_PLATFORM)/*.c))
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+ifeq ($(wildcard $(srctree)/$(src)/linux/license/gpl/*),)
+    ccflags-y += -I$(srctree)/$(src)/linux/license/proprietary
+    ifeq ($(CONFIG_MALI400_PROFILING),y)
+        $(error Profiling is incompatible with non-GPL license)
+    endif
+    ifeq ($(CONFIG_PM_RUNTIME),y)
+        $(error Runtime PM is incompatible with non-GPL license)
+    endif
+    ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
+        $(error DMA-BUF is incompatible with non-GPL license)
+    endif
+    $(error Linux Device integration is incompatible with non-GPL license)
+else
+    ccflags-y += -I$(srctree)/$(src)/linux/license/gpl
+endif
+
+mali-y += \
+       linux/mali_osk_atomics.o \
+       linux/mali_osk_irq.o \
+       linux/mali_osk_wq.o \
+       linux/mali_osk_locks.o \
+       linux/mali_osk_wait_queue.o \
+       linux/mali_osk_low_level_mem.o \
+       linux/mali_osk_math.o \
+       linux/mali_osk_memory.o \
+       linux/mali_osk_misc.o \
+       linux/mali_osk_mali.o \
+       linux/mali_osk_notification.o \
+       linux/mali_osk_time.o \
+       linux/mali_osk_timers.o
+
+mali-y += \
+       linux/mali_ukk_mem.o \
+       linux/mali_ukk_gp.o \
+       linux/mali_ukk_pp.o \
+       linux/mali_ukk_core.o
+
+# Source files which always are included in a build
+mali-y += \
+       common/mali_kernel_core.o \
+       linux/mali_kernel_linux.o \
+       common/mali_kernel_descriptor_mapping.o \
+       common/mali_session.o \
+       common/mali_device_pause_resume.o \
+       common/mali_kernel_vsync.o \
+       linux/mali_ukk_vsync.o \
+       linux/mali_kernel_sysfs.o \
+       common/mali_mmu.o \
+       common/mali_mmu_page_directory.o \
+       common/mali_memory.o \
+       common/mali_kernel_memory_engine.o \
+       common/mali_block_allocator.o \
+       common/mali_kernel_mem_os.o \
+       common/mali_mem_validation.o \
+       common/mali_hw_core.o \
+       common/mali_gp.o \
+       common/mali_pp.o \
+       common/mali_pp_job.o \
+       common/mali_gp_job.o \
+       common/mali_scheduler.o \
+       common/mali_gp_scheduler.o \
+       common/mali_pp_scheduler.o \
+       common/mali_group.o \
+       common/mali_dlbu.o \
+       common/mali_broadcast.o \
+       common/mali_pm.o \
+       common/mali_pmu.o \
+       common/mali_user_settings_db.o \
+       common/mali_kernel_utilization.o \
+       common/mali_l2_cache.o \
+       linux/mali_osk_pm.o \
+       linux/mali_pmu_power_up_down.o \
+       __malidrv_build_info.o
+
+ifneq ($(MALI_PLATFORM_FILES),)
+       mali-y += $(MALI_PLATFORM_FILES:.c=.o)
+endif
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(srctree)/$(src)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_dma_buf.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o linux/mali_sync_user.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DPROFILING_SKIP_PP_JOBS=$(PROFILING_SKIP_PP_JOBS)
+ccflags-y += -DPROFILING_SKIP_PP_AND_GP_JOBS=$(PROFILING_SKIP_PP_AND_GP_JOBS)
+
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ccflags-y += -DMALI_STATE_TRACKING=1
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+       ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+ccflags-$(CONFIG_MALI400_UMP) += -I$(srctree)/$(src)/../../ump/include/ump
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(srctree)/$(src) -I$(srctree)/$(src)/include -I$(srctree)/$(src)/common -I$(srctree)/$(src)/linux -I$(srctree)/$(src)/platform
+# MALI_SEC 
+ccflags-y += -I$(srctree)/$(src)/../ump/include
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(srctree)/$(src)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(src); (svn info || git svn info || \
+       echo -e "\nURL: $(MALI_RELEASE_NAME)\n" \
+       "Last Changed Rev: $(MALI_RELEASE_NAME)\n" \
+       "Last Changed Date: $(MALI_RELEASE_NAME)") 2>/dev/null)
+
+SVN_REV := $(shell (cd $(src); echo "$(SVN_INFO)" | grep '^Revision: '| sed -e 's/^Revision: //' ) 2>/dev/null )
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(srctree)/$(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(src)/__malidrv_build_info.c:
+       @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c
diff --git a/drivers/gpu/arm/mali400/mali/Kconfig b/drivers/gpu/arm/mali400/mali/Kconfig
new file mode 100644 (file)
index 0000000..a4b8ac8
--- /dev/null
@@ -0,0 +1,43 @@
+config MALI400_DEBUG
+       bool "Enable debug in Mali driver"
+       depends on MALI400
+       ---help---
+         This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING
+       bool "Enable Mali profiling"
+       depends on MALI400
+       select TRACEPOINTS
+       default n
+       ---help---
+         This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+       bool "Enable internal Mali profiling API"
+       depends on MALI400_PROFILING
+       default n
+       ---help---
+         This enables the internal legacy Mali profiling API.
+
+config MALI400_UMP
+       bool "Enable UMP support"
+       depends on MALI400
+       default n
+       ---help---
+         This enables support for the UMP memory sharing API in the Mali driver.
+
+source "drivers/gpu/arm/mali400/ump/Kconfig"
+
+config MALI_DVFS
+       bool "Enables mali DVFS"
+       depends on MALI400 && PM
+       default n
+       ---help---
+               This enables Mali driver DVFS.
+
+config SLP_MALI_DBG
+       bool "Enable mali debug"
+       depends on MALI400
+       default y
+       ---help---
+               This enables the panic when mali register is accessed without power on
diff --git a/drivers/gpu/arm/mali400/mali/MALI_CONFIGURATION b/drivers/gpu/arm/mali400/mali/MALI_CONFIGURATION
new file mode 100644 (file)
index 0000000..b0c2ebd
--- /dev/null
@@ -0,0 +1,25 @@
+# Location of default kernels
+KDIR-odroida:=/projects/pr297/linux/odroid-a/current/linux
+KDIR-odroidpc:=/projects/pr297/linux/odroid-pc/current/linux
+KDIR-odroidq:=/projects/pr297/linux/odroid-q/current/linux
+KDIR-orion:=/projects/pr297/linux/orion/current/linux
+KDIR-pegasus:=/projects/pr297/linux/pegasus-smdk/current/linux
+KDIR-tcc8900:=/projects/pr297/linux/tcc8900/current/linux
+KDIR-pb11mp:=/projects/pr297/linux/pb11mp/current/linux
+KDIR-vea9:=/projects/pr297/linux/vea9/current/linux
+KDIR-snowball:=/no/default/kernel/yet
+
+# Name of platform directory with platform specific code (should be built into kernel on a real system) 
+MALI_PLATFORM-odroida=exynos4
+MALI_PLATFORM-odroidpc=exynos4
+MALI_PLATFORM-odroidq=exynos4
+MALI_PLATFORM-orion=exynos4
+MALI_PLATFORM-pegasus=exynos4
+# MALI_SEC 
+MALI_PLATFORM-pegasus-m400=pegasus-m400
+MALI_PLATFORM-redwood=redwood
+MALI_PLATFORM-tcc8900=tcc8900
+MALI_PLATFORM-pb11mp=arm
+MALI_PLATFORM-vea9=arm
+MALI_PLATFORM-snowball=ux500
+MALI_PLATFORM-exynos4=exynos4
diff --git a/drivers/gpu/arm/mali400/mali/Makefile b/drivers/gpu/arm/mali400/mali/Makefile
new file mode 100644 (file)
index 0000000..0a47c9f
--- /dev/null
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+       $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+       then \
+               echo "$(2)"; \
+       else \
+               echo "$(3)"; \
+       fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+
+ifneq ($(MALI_PLATFORM),)
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS reqired for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+endif
+endif
+
+ifneq ($(BUILD),release)
+export CONFIG_MALI400_DEBUG=y
+endif
+
+all: $(UMP_SYMVERS_FILE)
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+       @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/gpu/arm/mali400/mali/__malidrv_build_info.c b/drivers/gpu/arm/mali400/mali/__malidrv_build_info.c
new file mode 100644 (file)
index 0000000..ed3062f
--- /dev/null
@@ -0,0 +1 @@
+const char *__malidrv_build_info(void) { return "malidrv:  API_VERSION=19 REPO_URL=r3p2-01rel0 REVISION=r3p2-01rel0 CHANGED_REVISION= CHANGE_DATE= BUILD_DATE=Mon Dec 17 14:01:05 KST 2012 BUILD=release TARGET_PLATFORM=pegasus-m400 MALI_PLATFORM=pegasus-m400 KDIR= OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=16 USING_UMP=y USING_PROFILING= USING_INTERNAL_PROFILING= USING_GPU_UTILIZATION=1 MALI_UPPER_HALF_SCHEDULING=1";}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_block_allocator.c b/drivers/gpu/arm/mali400/mali/common/mali_block_allocator.c
new file mode 100644 (file)
index 0000000..3f9a692
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL)  /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+       struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+       /* The list will be released in reverse order */
+       block_info *last_allocated;
+       mali_allocation_engine * engine;
+       mali_memory_allocation * descriptor;
+       u32 start_offset;
+       u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+    _mali_osk_lock_t *mutex;
+       block_info * all_blocks;
+       block_info * first_free;
+       u32 base;
+       u32 cpu_usage_adjust;
+       u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine,  mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+       mali_physical_memory_allocator * allocator;
+       block_allocator * info;
+       u32 usable_size;
+       u32 num_blocks;
+
+       usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+       MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+       MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+       num_blocks = usable_size / MALI_BLOCK_SIZE;
+       MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+       if (usable_size == 0)
+       {
+               MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+               return NULL;
+       }
+
+       allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+       if (NULL != allocator)
+       {
+               info = _mali_osk_malloc(sizeof(block_allocator));
+               if (NULL != info)
+               {
+            info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+            if (NULL != info->mutex)
+            {
+                       info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+                           if (NULL != info->all_blocks)
+                           {
+                                   u32 i;
+                                   info->first_free = NULL;
+                                   info->num_blocks = num_blocks;
+
+                                   info->base = base_address;
+                                   info->cpu_usage_adjust = cpu_usage_adjust;
+
+                                   for ( i = 0; i < num_blocks; i++)
+                                   {
+                                           info->all_blocks[i].next = info->first_free;
+                                           info->first_free = &info->all_blocks[i];
+                                   }
+
+                                   allocator->allocate = block_allocator_allocate;
+                                   allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+                                   allocator->destroy = block_allocator_destroy;
+                                   allocator->stat = block_allocator_stat;
+                                   allocator->ctx = info;
+                                       allocator->name = name;
+
+                                   return allocator;
+                           }
+                _mali_osk_lock_term(info->mutex);
+            }
+                       _mali_osk_free(info);
+               }
+               _mali_osk_free(allocator);
+       }
+
+       return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+       block_allocator * info;
+       MALI_DEBUG_ASSERT_POINTER(allocator);
+       MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+       info = (block_allocator*)allocator->ctx;
+
+       _mali_osk_free(info->all_blocks);
+    _mali_osk_lock_term(info->mutex);
+       _mali_osk_free(info);
+       _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+       return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+       block_allocator * info;
+       u32 left;
+       block_info * last_allocated = NULL;
+       mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+       block_allocator_allocation *ret_allocation;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(offset);
+       MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+       info = (block_allocator*)ctx;
+       left = descriptor->size - *offset;
+       MALI_DEBUG_ASSERT(0 != left);
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+       ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+       if ( NULL == ret_allocation )
+       {
+               /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+               _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+               return result;
+       }
+
+       ret_allocation->start_offset = *offset;
+       ret_allocation->mapping_length = 0;
+
+       while ((left > 0) && (info->first_free))
+       {
+               block_info * block;
+               u32 phys_addr;
+               u32 padding;
+               u32 current_mapping_size;
+
+               block = info->first_free;
+               info->first_free = info->first_free->next;
+               block->next = last_allocated;
+               last_allocated = block;
+
+               phys_addr = get_phys(info, block);
+
+               padding = *offset & (MALI_BLOCK_SIZE-1);
+
+               if (MALI_BLOCK_SIZE - padding < left)
+               {
+                       current_mapping_size = MALI_BLOCK_SIZE - padding;
+               }
+               else
+               {
+                       current_mapping_size = left;
+               }
+
+               if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+               {
+                       MALI_DEBUG_PRINT(1, ("Mapping of physical memory  failed\n"));
+                       result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+                       mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+                       /* release all memory back to the pool */
+                       while (last_allocated)
+                       {
+                               /* This relinks every block we've just allocated back into the free-list */
+                               block = last_allocated->next;
+                               last_allocated->next = info->first_free;
+                               info->first_free = last_allocated;
+                               last_allocated = block;
+                       }
+
+                       break;
+               }
+
+               *offset += current_mapping_size;
+               left -= current_mapping_size;
+               ret_allocation->mapping_length += current_mapping_size;
+       }
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+       if (last_allocated)
+       {
+               if (left) result = MALI_MEM_ALLOC_PARTIAL;
+               else result = MALI_MEM_ALLOC_FINISHED;
+
+               /* Record all the information about this allocation */
+               ret_allocation->last_allocated = last_allocated;
+               ret_allocation->engine = engine;
+               ret_allocation->descriptor = descriptor;
+
+               alloc_info->ctx = info;
+               alloc_info->handle = ret_allocation;
+               alloc_info->release = block_allocator_release;
+       }
+       else
+       {
+               /* Free the allocation information - nothing to be passed back */
+               _mali_osk_free( ret_allocation );
+       }
+
+       return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+       block_allocator * info;
+       block_info * block, * next;
+       block_allocator_allocation *allocation;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(handle);
+
+       info = (block_allocator*)ctx;
+       allocation = (block_allocator_allocation*)handle;
+       block = allocation->last_allocated;
+
+       MALI_DEBUG_ASSERT_POINTER(block);
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+       {
+               MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+               return;
+       }
+
+       /* unmap */
+       mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+       while (block)
+       {
+               MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+               next = block->next;
+
+               /* relink into free-list */
+               block->next = info->first_free;
+               info->first_free = block;
+
+               /* advance the loop */
+               block = next;
+       }
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+       _mali_osk_free( allocation );
+}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+       block_allocator * info;
+       mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(block);
+       info = (block_allocator*)ctx;
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+       if (NULL != info->first_free)
+       {
+               void * virt;
+               u32 phys;
+               u32 size;
+               block_info * alloc;
+               alloc = info->first_free;
+
+               phys = get_phys(info, alloc); /* Does not modify info or alloc */
+               size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+               virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+               /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+                * because it's unlikely another allocator will be able to map in. */
+
+               if ( NULL != virt )
+               {
+                       block->ctx = info; /* same as incoming ctx */
+                       block->handle = alloc;
+                       block->phys_base = phys;
+                       block->size = size;
+                       block->release = block_allocator_release_page_table_block;
+                       block->mapping = virt;
+
+                       info->first_free = alloc->next;
+
+                       alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+                       result = MALI_MEM_ALLOC_FINISHED;
+               }
+       }
+       else result = MALI_MEM_ALLOC_NONE;
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+       return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+       block_allocator * info;
+       block_info * block, * next;
+
+       MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+       info = (block_allocator*)page_table_block->ctx;
+       block = (block_info*)page_table_block->handle;
+
+       MALI_DEBUG_ASSERT_POINTER(info);
+       MALI_DEBUG_ASSERT_POINTER(block);
+
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+       {
+               MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+               return;
+       }
+
+       /* Unmap all the physical memory at once */
+       _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+       /** @note This loop handles the case where more than one block_info was linked.
+        * Probably unnecessary for page table block releasing. */
+       while (block)
+       {
+               next = block->next;
+
+               MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+               block->next = info->first_free;
+               info->first_free = block;
+
+               block = next;
+       }
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+       block_allocator * info;
+       block_info *block;
+       u32 free_blocks = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(allocator);
+
+       info = (block_allocator*)allocator->ctx;
+       block = info->first_free;
+
+       while(block)
+       {
+               free_blocks++;
+               block = block->next;
+       }
+       return (info->num_blocks - free_blocks) * MALI_BLOCK_SIZE;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_block_allocator.h b/drivers/gpu/arm/mali400/mali/common/mali_block_allocator.h
new file mode 100644 (file)
index 0000000..9978271
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_broadcast.c b/drivers/gpu/arm/mali400/mali/common/mali_broadcast.c
new file mode 100644 (file)
index 0000000..450c903
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static const int bcast_unit_reg_size = 0x1000;
+static const int bcast_unit_addr_broadcast_mask = 0x0;
+static const int bcast_unit_addr_irq_override_mask = 0x4;
+
+struct mali_bcast_unit
+{
+       struct mali_hw_core hw_core;
+       u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+       struct mali_bcast_unit *bcast_unit = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(resource);
+       MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+
+       bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+       if (NULL == bcast_unit)
+       {
+               return NULL;
+       }
+
+       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size))
+       {
+               bcast_unit->current_mask = 0;
+               mali_bcast_reset(bcast_unit);
+
+               return bcast_unit;
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+       }
+
+       return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       mali_hw_core_delete(&bcast_unit->hw_core);
+       _mali_osk_free(bcast_unit);
+}
+
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+       u32 core_id;
+       u32 broadcast_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       core_id = mali_pp_core_get_id(mali_group_get_pp_core(group));
+       broadcast_mask = bcast_unit->current_mask;
+
+       /* set the bit corresponding to the group's core's id to 1 */
+       core_id = 1 << core_id;
+       broadcast_mask |= (core_id); /* add PP core to broadcast */
+       broadcast_mask |= (core_id << 16); /* add MMU to broadcast */
+
+       /* store mask so we can restore on reset */
+       bcast_unit->current_mask = broadcast_mask;
+
+       mali_bcast_reset(bcast_unit);
+}
+
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+       u32 core_id;
+       u32 broadcast_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       core_id = mali_pp_core_get_id(mali_group_get_pp_core(group));
+       broadcast_mask = bcast_unit->current_mask;
+
+       /* set the bit corresponding to the group's core's id to 0 */
+       core_id = 1 << core_id;
+       broadcast_mask &= ~((core_id << 16) | core_id);
+
+       /* store mask so we can restore on reset */
+       bcast_unit->current_mask = broadcast_mask;
+
+       mali_bcast_reset(bcast_unit);
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       /* set broadcast mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   bcast_unit_addr_broadcast_mask,
+                                   bcast_unit->current_mask);
+
+       /* set IRQ override mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   bcast_unit_addr_irq_override_mask,
+                                   bcast_unit->current_mask & 0xFF);
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_broadcast.h b/drivers/gpu/arm/mali400/mali/common/mali_broadcast.h
new file mode 100644 (file)
index 0000000..a6ccde5
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/*
+ *  Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 Â¡Â¿ (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ *   setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_device_pause_resume.c b/drivers/gpu/arm/mali400/mali/common/mali_device_pause_resume.c
new file mode 100644 (file)
index 0000000..74c545f
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_group.h"
+
+void mali_dev_pause(mali_bool *power_is_on)
+{
+       mali_bool power_is_on_tmp;
+
+       /* Locking the current power state - so it will not switch from being ON to OFF, but it might remain OFF */
+       power_is_on_tmp = _mali_osk_pm_dev_ref_add_no_power_on();
+       if (NULL != power_is_on)
+       {
+               *power_is_on = power_is_on_tmp;
+       }
+
+       mali_gp_scheduler_suspend();
+       mali_pp_scheduler_suspend();
+}
+
+void mali_dev_resume(void)
+{
+       mali_gp_scheduler_resume();
+       mali_pp_scheduler_resume();
+
+       /* Release our PM reference, as it is now safe to turn of the GPU again */
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_device_pause_resume.h b/drivers/gpu/arm/mali400/mali/common/mali_device_pause_resume.h
new file mode 100644 (file)
index 0000000..86b30c4
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#include "mali_osk.h"
+
+/**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ *
+ * @param power_is_on Receives the power current status of Mali GPU. MALI_TRUE if GPU is powered on
+ */
+void mali_dev_pause(mali_bool *power_is_on);
+
+/**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+void mali_dev_resume(void);
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_dlbu.c b/drivers/gpu/arm/mali400/mali/common/mali_dlbu.c
new file mode 100644 (file)
index 0000000..b75c75c
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+u32 mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = 0;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+       MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+                                                            31:12 Physical address to the page used for the DLBU
+                                                            0 DLBU enable - set this bit to 1 enables the AXI bus
+                                                            between PPs and L2s, setting to 0 disables the router and
+                                                            no further transactions are sent to DLBU */
+       MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR     = 0x0004, /**< Master tile list virtual base address;
+                                                            31:12 Virtual address to the page used for the DLBU */
+       MALI_DLBU_REGISTER_TLLIST_VBASEADDR        = 0x0008, /**< Tile list virtual base address;
+                                                            31:12 Virtual address to the tile list. This address is used when
+                                                            calculating the call address sent to PP.*/
+       MALI_DLBU_REGISTER_FB_DIM                  = 0x000C, /**< Framebuffer dimension;
+                                                            23:16 Number of tiles in Y direction-1
+                                                            7:0 Number of tiles in X direction-1 */
+       MALI_DLBU_REGISTER_TLLIST_CONF             = 0x0010, /**< Tile list configuration;
+                                                            29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+                                                            21:16 2^n number of tiles to be binned to one tile list in Y direction
+                                                            5:0 2^n number of tiles to be binned to one tile list in X direction */
+       MALI_DLBU_REGISTER_START_TILE_POS          = 0x0014, /**< Start tile positions;
+                                                            31:24 start position in Y direction for group 1
+                                                            23:16 start position in X direction for group 1
+                                                            15:8 start position in Y direction for group 0
+                                                            7:0 start position in X direction for group 0 */
+       MALI_DLBU_REGISTER_PP_ENABLE_MASK          = 0x0018, /**< PP enable mask;
+                                                            7 enable PP7 for load balancing
+                                                            6 enable PP6 for load balancing
+                                                            5 enable PP5 for load balancing
+                                                            4 enable PP4 for load balancing
+                                                            3 enable PP3 for load balancing
+                                                            2 enable PP2 for load balancing
+                                                            1 enable PP1 for load balancing
+                                                            0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum
+{
+       PP0ENABLE = 0,
+       PP1ENABLE,
+       PP2ENABLE,
+       PP3ENABLE,
+       PP4ENABLE,
+       PP5ENABLE,
+       PP6ENABLE,
+       PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core
+{
+       struct mali_hw_core     hw_core;           /**< Common for all HW cores */
+       u32                     pp_cores_mask;     /**< This is a mask for the PP cores whose operation will be controlled by LBU
+                                                     see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+       MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+       if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr))
+       {
+               MALI_SUCCESS;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+       mali_mmu_release_table_page(mali_dlbu_phys_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
+{
+       struct mali_dlbu_core *core = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+       core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+       if (NULL != core)
+       {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE))
+               {
+                       core->pp_cores_mask = 0;
+                       if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core))
+                       {
+                               return core;
+                       }
+                       MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       mali_dlbu_reset(dlbu);
+       mali_hw_core_delete(&dlbu->hw_core);
+       _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+       u32 dlbu_registers[7];
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+       dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+       dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+       dlbu_registers[2] = 0;
+       dlbu_registers[3] = 0;
+       dlbu_registers[4] = 0;
+       dlbu_registers[5] = 0;
+       dlbu_registers[6] = dlbu->pp_cores_mask;
+
+       /* write reset values to core registers */
+       mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+       err = _MALI_OSK_ERR_OK;
+
+       return err;
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+       struct mali_pp_core *pp_core;
+       u32 core_id;
+
+       MALI_DEBUG_ASSERT_POINTER( dlbu );
+       MALI_DEBUG_ASSERT_POINTER( group );
+
+       pp_core = mali_group_get_pp_core(group);
+       core_id = mali_pp_core_get_id(pp_core);
+
+       dlbu->pp_cores_mask |= (0x1 << core_id);
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n",core_id , dlbu->pp_cores_mask));
+
+       mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+       struct mali_pp_core *pp_core;
+       u32 core_id;
+
+       MALI_DEBUG_ASSERT_POINTER( dlbu );
+       MALI_DEBUG_ASSERT_POINTER( group );
+
+       pp_core = mali_group_get_pp_core(group);
+       core_id = mali_pp_core_get_id(pp_core);
+
+       dlbu->pp_cores_mask &= ~(0x1 << core_id);
+               MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", core_id, dlbu->pp_cores_mask));
+
+       mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+       u32 *registers;
+       MALI_DEBUG_ASSERT(job);
+       registers = mali_pp_job_get_dlbu_registers(job);
+       MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+       /* Writing 4 registers:
+        * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+       mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_dlbu.h b/drivers/gpu/arm/mali400/mali/common/mali_dlbu.h
new file mode 100644 (file)
index 0000000..dfb86d5
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+
+extern u32 mali_dlbu_phys_addr;
+
+struct mali_dlbu_core;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_gp.c b/drivers/gpu/arm/mali400/mali/common/mali_gp.c
new file mode 100644 (file)
index 0000000..92ede3f
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group)
+{
+       struct mali_gp_core* core = NULL;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+       MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+       core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+       if (NULL != core)
+       {
+               core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+               core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE))
+               {
+                       _mali_osk_errcode_t ret;
+
+                       ret = mali_gp_reset(core);
+
+                       if (_MALI_OSK_ERR_OK == ret)
+                       {
+                               ret = mali_group_add_gp_core(group, core);
+                               if (_MALI_OSK_ERR_OK == ret)
+                               {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       core->irq = _mali_osk_irq_init(resource->irq,
+                                                                      mali_group_upper_half_gp,
+                                                                      group,
+                                                                      mali_gp_irq_probe_trigger,
+                                                                      mali_gp_irq_probe_ack,
+                                                                      core,
+                                                                      "mali_gp_irq_handlers");
+                                       if (NULL != core->irq)
+                                       {
+                                               MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+                                               mali_global_gp_core = core;
+
+                                               return core;
+                                       }
+                                       else
+                                       {
+                                               MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+                                       }
+                                       mali_group_remove_gp_core(group);
+                               }
+                               else
+                               {
+                                       MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+                               }
+                       }
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       _mali_osk_irq_term(core->irq);
+       mali_hw_core_delete(&core->hw_core);
+       mali_global_gp_core = NULL;
+       _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Send the stop bus command. */
+       mali_gp_stop_bus(core);
+
+       /* Wait for bus to be stopped */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+       {
+               if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED)
+               {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i)
+       {
+               MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+       const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+       const u32 reset_invalid_value = 0xC0FFE000;
+       const u32 reset_check_value = 0xC01A0000;
+       const u32 reset_default_value = 0;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+       MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+       {
+               mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register))
+               {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i)
+       {
+               MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+       }
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+       int i;
+       u32 rawstat = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+       {
+               rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+               if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED)
+               {
+                       break;
+               }
+       }
+
+       if (i == MALI_REG_POLL_COUNT_FAST)
+       {
+               MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+                                core->hw_core.description, rawstat));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+       mali_gp_reset_async(core);
+       return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+       u32 startcmd = 0;
+       u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+
+       core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
+       core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       if (mali_gp_job_has_vs_job(job))
+       {
+               startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+       }
+
+       if (mali_gp_job_has_plbu_job(job))
+       {
+               startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+       }
+
+       MALI_DEBUG_ASSERT(0 != startcmd);
+
+       mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+       {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+       {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+       /* Barrier to make sure the previous register write is finished */
+       _mali_osk_write_mem_barrier();
+
+       /* This is the command that starts the core. */
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+
+       /* Barrier to make sure the previous register write is finished */
+       _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+       u32 irq_readout;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+       if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+       {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+               MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+               _mali_osk_write_mem_barrier();
+       }
+       /*
+        * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+        * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+        */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+       return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+       struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+       _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+       struct mali_gp_core *core = (struct mali_gp_core *)data;
+       u32 irq_readout;
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+       if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+       {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+               _mali_osk_mem_barrier();
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+       return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+{
+       u32 val0 = 0;
+       u32 val1 = 0;
+
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+       {
+               val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+               mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(COUNTER_VP_C0, val0);
+#endif
+
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+       {
+               val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+               mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(COUNTER_VP_C1, val1);
+#endif
+       }
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_gp.h b/drivers/gpu/arm/mali400/mali/common/mali_gp.h
new file mode 100644 (file)
index 0000000..aa246fd
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core
+{
+       struct mali_hw_core  hw_core;           /**< Common for all HW cores */
+       _mali_osk_irq_t     *irq;               /**< IRQ handler */
+       u32                  counter_src0_used; /**< The selected performance counter 0 when a job is running */
+       u32                  counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+
+/*** Accessor functions ***/
+MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+{
+       return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+{
+       /* Enable all interrupts, except those specified in irq_exceptions */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
+                                   MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_gp_job.c b/drivers/gpu/arm/mali400/mali/common/mali_gp_job.c
new file mode 100644 (file)
index 0000000..7136526
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER;          /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id)
+{
+       struct mali_gp_job *job;
+       u32 perf_counter_flag;
+
+       job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+       if (NULL != job)
+       {
+               job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+               if (NULL == job->finished_notification)
+               {
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+               if (NULL == job->oom_notification)
+               {
+                       _mali_osk_notification_delete(job->finished_notification);
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               if (0 != copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s)))
+               {
+                       _mali_osk_notification_delete(job->finished_notification);
+                       _mali_osk_notification_delete(job->oom_notification);
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+               /* case when no counters came from user space
+                * so pass the debugfs / DS-5 provided global ones to the job object */
+               if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+                               (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)))
+               {
+                       mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+                       mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+               }
+
+               _mali_osk_list_init(&job->list);
+               job->session = session;
+               job->id = id;
+               job->heap_current_addr = job->uargs.frame_registers[4];
+               job->perf_counter_value0 = 0;
+               job->perf_counter_value1 = 0;
+               job->pid = _mali_osk_get_pid();
+               job->tid = _mali_osk_get_tid();
+
+               return job;
+       }
+
+       return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+
+       /* de-allocate the pre-allocated oom notifications */
+       if (NULL != job->oom_notification)
+       {
+               _mali_osk_notification_delete(job->oom_notification);
+               job->oom_notification = NULL;
+       }
+       if (NULL != job->finished_notification)
+       {
+               _mali_osk_notification_delete(job->finished_notification);
+               job->finished_notification = NULL;
+       }
+
+       _mali_osk_free(job);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+       return gp_counter_src0;
+}
+
+mali_bool mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+       gp_counter_src0 = counter;
+
+       return MALI_TRUE;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+       return gp_counter_src1;
+}
+
+mali_bool mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+       gp_counter_src1 = counter;
+
+       return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_gp_job.h b/drivers/gpu/arm/mali400/mali/common/mali_gp_job.h
new file mode 100644 (file)
index 0000000..13258c5
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+
+/**
+ * The structure represents a GP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job
+{
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       _mali_uk_gp_start_job_s uargs;                     /**< Arguments from user space */
+       u32 id;                                            /**< identifier for this job in kernel space (sequential numbering) */
+       u32 heap_current_addr;                             /**< Holds the current HEAP address when the job has completed */
+       u32 perf_counter_value0;                           /**< Value of performance counter 0 (to be returned to user space) */
+       u32 perf_counter_value1;                           /**< Value of performance counter 1 (to be returned to user space) */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+       _mali_osk_notification_t *oom_notification;        /**< Notification sent back to userspace on OOM */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+mali_bool mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+mali_bool mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+       return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+       return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+       return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+       return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+       return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+       return job->tid;
+}
+
+MALI_STATIC_INLINE u32* mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+       return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+       return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+       return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+       return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+       return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+       job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+       return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+       return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+       return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+       return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+       return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+       job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+       job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+       job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+       job->perf_counter_value1 = value;
+}
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.c b/drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.c
new file mode 100644 (file)
index 0000000..81e9447
--- /dev/null
@@ -0,0 +1,474 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+
+enum mali_gp_slot_state
+{
+       MALI_GP_SLOT_STATE_IDLE,
+       MALI_GP_SLOT_STATE_WORKING,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot
+{
+       struct mali_group *group;
+       /*
+        * We keep track of the state here as well as in the group object
+        * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+        */
+       enum mali_gp_slot_state state;
+       u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD(job_queue);                          /* List of jobs with some unscheduled work */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void);
+
+static _mali_osk_lock_t *gp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+       u32 num_groups;
+       u32 i;
+
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+
+       gp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+       if (NULL == gp_scheduler_lock)
+       {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == gp_scheduler_working_wait_queue)
+       {
+               _mali_osk_lock_term(gp_scheduler_lock);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       /* Find all the available GP cores */
+       num_groups = mali_group_get_glob_num_groups();
+       for (i = 0; i < num_groups; i++)
+       {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+               if (NULL != gp_core)
+               {
+                       if (0 == gp_version)
+                       {
+                               /* Retrieve GP version */
+                               gp_version = mali_gp_core_get_version(gp_core);
+                       }
+                       slot.group = group;
+                       slot.state = MALI_GP_SLOT_STATE_IDLE;
+                       break; /* There is only one GP, no point in looking for more */
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+       MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
+       MALI_DEBUG_ASSERT_POINTER(slot.group);
+       mali_group_delete(slot.group);
+
+       _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+       _mali_osk_lock_term(gp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+       if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+       {
+               /* Non-interruptable lock failed: this should never happen. */
+               MALI_DEBUG_ASSERT(0);
+       }
+       MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+       _mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void)
+{
+       MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+}
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() mali_gp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED()
+#endif
+
+static void mali_gp_scheduler_schedule(void)
+{
+       struct mali_gp_job *job;
+
+       mali_gp_scheduler_lock();
+
+       if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
+       {
+               MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+                                    pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+               mali_gp_scheduler_unlock();
+               return; /* Nothing to do, so early out */
+       }
+
+       /* Get (and remove) next job in queue */
+       job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+       _mali_osk_list_del(&job->list);
+
+       /* Mark slot as busy */
+       slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+       mali_gp_scheduler_unlock();
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+       mali_group_lock(slot.group);
+
+       if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job))
+       {
+               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
+               MALI_DEBUG_ASSERT(0); /* @@@@ todo: this cant fail on Mali-300+, no need to implement put back of job */
+       }
+
+       mali_group_unlock(slot.group);
+}
+
+/* @@@@ todo: pass the job in as a param to this function, so that we don't have to take the scheduler lock again */
+static void mali_gp_scheduler_schedule_on_group(struct mali_group *group)
+{
+       struct mali_gp_job *job;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+       if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
+       {
+               mali_gp_scheduler_unlock();
+               MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+                                    pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+               return; /* Nothing to do, so early out */
+       }
+
+       /* Get (and remove) next job in queue */
+       job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+       _mali_osk_list_del(&job->list);
+
+       /* Mark slot as busy */
+       slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+       mali_gp_scheduler_unlock();
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+       if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job))
+       {
+               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
+               MALI_DEBUG_ASSERT(0); /* @@@@ todo: this cant fail on Mali-300+, no need to implement put back of job */
+       }
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+       _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
+       _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       if (MALI_TRUE == success)
+       {
+               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+       }
+       else
+       {
+               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+       }
+
+       jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+       jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+       jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+       mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
+       job->finished_notification = NULL;
+
+       mali_gp_job_delete(job);
+}
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+       mali_gp_scheduler_return_job_to_user(job, success);
+
+       mali_gp_scheduler_lock();
+
+       /* Mark slot as idle again */
+       slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+       /* If paused, then this was the last job, so wake up sleeping workers */
+       if (pause_count > 0)
+       {
+               _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+       }
+
+       mali_gp_scheduler_schedule_on_group(group);
+
+       /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+       mali_pm_core_event(MALI_CORE_EVENT_GP_STOP);
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+       _mali_uk_gp_job_suspended_s * jobres;
+       _mali_osk_notification_t * notification;
+
+       mali_gp_scheduler_lock();
+
+       notification = job->oom_notification;
+       job->oom_notification = NULL;
+       slot.returned_cookie = mali_gp_job_get_id(job);
+
+       jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       jobres->cookie = mali_gp_job_get_id(job);
+
+       mali_gp_scheduler_unlock();
+
+       jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
+
+       mali_session_send_notification(mali_gp_job_get_session(job), notification);
+
+       /*
+       * If this function failed, then we could return the job to user space right away,
+       * but there is a job timer anyway that will do that eventually.
+       * This is not exactly a common case anyway.
+       */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+       mali_gp_scheduler_lock();
+       pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+       mali_gp_scheduler_unlock();
+
+       _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+       mali_gp_scheduler_lock();
+       pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+       mali_gp_scheduler_unlock();
+       if (0 == pause_count)
+       {
+               mali_gp_scheduler_schedule();
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *job;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data*)ctx;
+
+       job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id());
+       if (NULL == job)
+       {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+#if PROFILING_SKIP_PP_AND_GP_JOBS
+#warning GP jobs will not be executed
+       mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
+       return _MALI_OSK_ERR_OK;
+#endif
+
+       mali_pm_core_event(MALI_CORE_EVENT_GP_START);
+
+       mali_gp_scheduler_lock();
+       _mali_osk_list_addtail(&job->list, &job_queue);
+       mali_gp_scheduler_unlock();
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
+
+       mali_gp_scheduler_schedule();
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       args->number_of_cores = 1;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       args->version = gp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *resumed_job;
+       _mali_osk_notification_t *new_notification = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       if (NULL == args->ctx)
+       {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       session = (struct mali_session_data*)args->ctx;
+       if (NULL == session)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code)
+       {
+               new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+               if (NULL == new_notification)
+               {
+                       MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
+                       mali_group_lock(slot.group);
+                       mali_group_abort_gp_job(slot.group, args->cookie);
+                       mali_group_unlock(slot.group);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       mali_group_lock(slot.group);
+
+       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code)
+       {
+               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+
+               resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+               if (NULL != resumed_job)
+               {
+                       /* @@@@ todo: move this and other notification handling into the job object itself */
+                       resumed_job->oom_notification = new_notification;
+                       mali_group_unlock(slot.group);
+                       return _MALI_OSK_ERR_OK;
+               }
+               else
+               {
+                       mali_group_unlock(slot.group);
+                       _mali_osk_notification_delete(new_notification);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+       mali_group_abort_gp_job(slot.group, args->cookie);
+       mali_group_unlock(slot.group);
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+       struct mali_gp_job *job, *tmp;
+
+       mali_gp_scheduler_lock();
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+       /* Check queue for jobs and remove */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list)
+       {
+               if (mali_gp_job_get_session(job) == session)
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Removing GP job 0x%08x from queue\n", job));
+                       _mali_osk_list_del(&(job->list));
+                       mali_gp_job_delete(job);
+
+                       mali_pm_core_event(MALI_CORE_EVENT_GP_STOP);
+               }
+       }
+
+       mali_gp_scheduler_unlock();
+
+       mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void)
+{
+       mali_bool ret;
+
+       mali_gp_scheduler_lock();
+       ret = pause_count > 0 && slot.state == MALI_GP_SLOT_STATE_IDLE;
+       mali_gp_scheduler_unlock();
+
+       return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+       n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+
+       n += mali_group_dump_state(slot.group, buf + n, size - n);
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       return n;
+}
+#endif
+
+void mali_gp_scheduler_reset_all_groups(void)
+{
+       if (NULL != slot.group)
+       {
+               mali_group_reset(slot.group);
+       }
+}
+
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+       if (NULL != slot.group)
+       {
+               mali_group_zap_session(slot.group, session);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.h b/drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.h
new file mode 100644 (file)
index 0000000..dccf9f3
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the GP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ */
+void mali_gp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_group.c b/drivers/gpu/arm/mali400/mali/common/mali_group.c
new file mode 100644 (file)
index 0000000..0034f9b
--- /dev/null
@@ -0,0 +1,1803 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_core.h"
+#include "mali_osk_profiling.h"
+
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS];
+static u32 mali_global_num_groups = 0;
+
+enum mali_group_activate_pd_status
+{
+       MALI_GROUP_ACTIVATE_PD_STATUS_FAILED,
+       MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD,
+       MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD,
+};
+
+/* local helper functions */
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_mmu_page_fault(struct mali_group *group);
+
+static void mali_group_post_process_job_pp(struct mali_group *group);
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
+
+void mali_group_lock(struct mali_group *group)
+{
+       if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(group->lock, _MALI_OSK_LOCKMODE_RW))
+       {
+               /* Non-interruptable lock failed: this should never happen. */
+               MALI_DEBUG_ASSERT(0);
+       }
+       MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+       MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+       _mali_osk_lock_signal(group->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+{
+       struct mali_group *group = NULL;
+       _mali_osk_lock_flags_t lock_flags;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#else
+       lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#endif
+
+       if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS)
+       {
+               MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+               return NULL;
+       }
+
+       group = _mali_osk_calloc(1, sizeof(struct mali_group));
+       if (NULL != group)
+       {
+               group->timeout_timer = _mali_osk_timer_init();
+
+               if (NULL != group->timeout_timer)
+               {
+                       _mali_osk_lock_order_t order;
+                       _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+                       if (NULL != dlbu)
+                       {
+                               order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
+                       }
+                       else
+                       {
+                               order = _MALI_OSK_LOCK_ORDER_GROUP;
+                       }
+
+                       group->lock = _mali_osk_lock_init(lock_flags, 0, order);
+                       if (NULL != group->lock)
+                       {
+                               group->l2_cache_core[0] = core;
+                               group->session = NULL;
+                               group->page_dir_ref_count = 0;
+                               group->power_is_on = MALI_TRUE;
+                               group->state = MALI_GROUP_STATE_IDLE;
+                               _mali_osk_list_init(&group->group_list);
+                               _mali_osk_list_init(&group->pp_scheduler_list);
+                               group->parent_group = NULL;
+                               group->l2_cache_core_ref_count[0] = 0;
+                               group->l2_cache_core_ref_count[1] = 0;
+                               group->bcast_core = bcast;
+                               group->dlbu_core = dlbu;
+
+                               mali_global_groups[mali_global_num_groups] = group;
+                               mali_global_num_groups++;
+
+                               return group;
+                       }
+            _mali_osk_timer_term(group->timeout_timer);
+               }
+               _mali_osk_free(group);
+       }
+
+       return NULL;
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core)
+{
+       /* This group object now owns the MMU core object */
+       group->mmu= mmu_core;
+       group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+       if (NULL == group->bottom_half_work_mmu)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+       /* This group object no longer owns the MMU core object */
+       group->mmu = NULL;
+       if (NULL != group->bottom_half_work_mmu)
+       {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
+{
+       /* This group object now owns the GP core object */
+       group->gp_core = gp_core;
+       group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+       if (NULL == group->bottom_half_work_gp)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+       /* This group object no longer owns the GP core object */
+       group->gp_core = NULL;
+       if (NULL != group->bottom_half_work_gp)
+       {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
+{
+       /* This group object now owns the PP core object */
+       group->pp_core = pp_core;
+       group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+       if (NULL == group->bottom_half_work_pp)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+       /* This group object no longer owns the PP core object */
+       group->pp_core = NULL;
+       if (NULL != group->bottom_half_work_pp)
+       {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+       u32 i;
+
+       MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+
+       MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+       /* Delete the resources that this group owns */
+       if (NULL != group->gp_core)
+       {
+               mali_gp_delete(group->gp_core);
+       }
+
+       if (NULL != group->pp_core)
+       {
+               mali_pp_delete(group->pp_core);
+       }
+
+       if (NULL != group->mmu)
+       {
+               mali_mmu_delete(group->mmu);
+       }
+
+       if (mali_group_is_virtual(group))
+       {
+               /* Remove all groups from virtual group */
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+               {
+                       child->parent_group = NULL;
+                       mali_group_delete(child);
+               }
+
+               mali_dlbu_delete(group->dlbu_core);
+
+               if (NULL != group->bcast_core)
+               {
+                       mali_bcast_unit_delete(group->bcast_core);
+               }
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_GROUPS; i++)
+       {
+               if (mali_global_groups[i] == group)
+               {
+                       mali_global_groups[i] = NULL;
+                       mali_global_num_groups--;
+
+                       if (i != mali_global_num_groups)
+                       {
+                               /* We removed a group from the middle of the array -- move the last
+                                * group to the current position to close the gap */
+                               mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+                               mali_global_groups[mali_global_num_groups] = NULL;
+                       }
+
+                       break;
+               }
+       }
+
+       if (NULL != group->timeout_timer)
+       {
+               _mali_osk_timer_del(group->timeout_timer);
+               _mali_osk_timer_term(group->timeout_timer);
+       }
+
+       if (NULL != group->bottom_half_work_mmu)
+       {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
+
+       if (NULL != group->bottom_half_work_gp)
+       {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
+
+       if (NULL != group->bottom_half_work_pp)
+       {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
+
+       _mali_osk_lock_term(group->lock);
+
+       _mali_osk_free(group);
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+       u32 i;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+       i = 0;
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list)
+       {
+               MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+               i++;
+       }
+})
+
+/**
+ * @brief Add child group to virtual group parent
+ *
+ * Before calling this function, child must have it's state set to JOINING_VIRTUAL
+ * to ensure it's not touched during the transition period. When this function returns,
+ * child's state will be IN_VIRTUAL.
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
+{
+       mali_bool found;
+       u32 i;
+
+       MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
+
+       MALI_ASSERT_GROUP_LOCKED(parent);
+
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(NULL == child->parent_group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
+
+       _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+       child->state = MALI_GROUP_STATE_IN_VIRTUAL;
+       child->parent_group = parent;
+
+       MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+       MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+       MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+       /* Keep track of the L2 cache cores of child groups */
+       found = MALI_FALSE;
+       for (i = 0; i < 2; i++)
+       {
+               if (parent->l2_cache_core[i] == child->l2_cache_core[0])
+               {
+                       MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+                       parent->l2_cache_core_ref_count[i]++;
+                       found = MALI_TRUE;
+               }
+       }
+
+       if (!found)
+       {
+               /* First time we see this L2 cache, add it to our list */
+               i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+               MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+               MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+               parent->l2_cache_core[i] = child->l2_cache_core[0];
+               parent->l2_cache_core_ref_count[i]++;
+       }
+
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_add_group(parent->bcast_core, child);
+       mali_dlbu_add_group(parent->dlbu_core, child);
+
+       /* Update MMU */
+       MALI_DEBUG_ASSERT(0 == child->page_dir_ref_count);
+       if (parent->session == child->session)
+       {
+               mali_mmu_zap_tlb(child->mmu);
+       }
+       else
+       {
+               child->session = NULL;
+
+               if (NULL == parent->session)
+               {
+                       mali_mmu_activate_empty_page_directory(child->mmu);
+               }
+               else
+               {
+
+                       mali_bool activate_success = mali_mmu_activate_page_directory(child->mmu,
+                               mali_session_get_page_directory(parent->session));
+                       MALI_DEBUG_ASSERT(activate_success);
+                       MALI_IGNORE(activate_success);
+               }
+       }
+       child->session = NULL;
+
+       /* Start job on child when parent is active */
+       if (NULL != parent->pp_running_job)
+       {
+               struct mali_pp_job *job = parent->pp_running_job;
+               MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+                                    child, mali_pp_job_get_id(job), parent));
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
+               mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+       }
+
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ *
+ * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
+ * to IDLE before it can be used.
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+       u32 i;
+
+       MALI_ASSERT_GROUP_LOCKED(parent);
+
+       MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(parent == child->parent_group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
+       /* Removing groups while running is not yet supported. */
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
+
+       mali_group_lock(child);
+
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_remove_group(parent->bcast_core, child);
+       mali_dlbu_remove_group(parent->dlbu_core, child);
+
+       _mali_osk_list_delinit(&child->group_list);
+
+       child->session = parent->session;
+       child->parent_group = NULL;
+       child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+       /* Keep track of the L2 cache cores of child groups */
+       i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+       MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+       parent->l2_cache_core_ref_count[i]--;
+
+       if (parent->l2_cache_core_ref_count[i] == 0)
+       {
+               parent->l2_cache_core[i] = NULL;
+       }
+
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+
+       mali_group_unlock(child);
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+       struct mali_group *child;
+
+       MALI_ASSERT_GROUP_LOCKED(parent);
+
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
+
+       child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+
+       mali_group_remove_group(parent, child);
+
+       return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+       /*
+        * This function should not be used to abort jobs,
+        * currently only called during insmod and PM resume
+        */
+       MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+       MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+       mali_group_lock(group);
+
+       group->session = NULL;
+
+       if (NULL != group->mmu)
+       {
+               mali_mmu_reset(group->mmu);
+       }
+
+       if (NULL != group->gp_core)
+       {
+               mali_gp_reset(group->gp_core);
+       }
+
+       if (NULL != group->pp_core)
+       {
+               mali_group_reset_pp(group);
+       }
+
+       mali_group_unlock(group);
+}
+
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
+{
+       return group->gp_core;
+}
+
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
+{
+       return group->pp_core;
+}
+
+_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+       struct mali_session_data *session;
+       enum mali_group_activate_pd_status activate_status;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+       session = mali_gp_job_get_session(job);
+
+       if (NULL != group->l2_cache_core[0])
+       {
+               mali_l2_cache_invalidate_all_conditional(group->l2_cache_core[0], mali_gp_job_get_id(job));
+       }
+
+       activate_status = mali_group_activate_page_directory(group, session);
+       if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+       {
+               /* if session is NOT kept Zapping is done as part of session switch */
+               if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+               {
+                       mali_mmu_zap_tlb_without_stall(group->mmu);
+               }
+               mali_gp_job_start(group->gp_core, job);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                       mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                               MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                               mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                               (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+               group->gp_running_job = job;
+               group->state = MALI_GROUP_STATE_WORKING;
+
+               /* Setup the timeout timer value and save the job id for the job running on the gp core */
+               _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+       struct mali_session_data *session;
+       enum mali_group_activate_pd_status activate_status;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+       session = mali_pp_job_get_session(job);
+
+       if (NULL != group->l2_cache_core[0])
+       {
+               mali_l2_cache_invalidate_all_conditional(group->l2_cache_core[0], mali_pp_job_get_id(job));
+       }
+
+       if (NULL != group->l2_cache_core[1])
+       {
+               mali_l2_cache_invalidate_all_conditional(group->l2_cache_core[1], mali_pp_job_get_id(job));
+       }
+
+       activate_status = mali_group_activate_page_directory(group, session);
+       if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+       {
+               /* if session is NOT kept Zapping is done as part of session switch */
+               if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+               {
+                       MALI_DEBUG_PRINT(3, ("PP starting job PD_Switch 0 Flush 1 Zap 1\n"));
+                       mali_mmu_zap_tlb_without_stall(group->mmu);
+               }
+
+               if (mali_group_is_virtual(group))
+               {
+                       struct mali_group *child;
+                       struct mali_group *temp;
+                       u32 core_num = 0;
+
+                       /* Configure DLBU for the job */
+                       mali_dlbu_config_job(group->dlbu_core, job);
+
+                       /* Write stack address for each child group */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+                       {
+                               mali_pp_write_addr_stack(child->pp_core, job);
+                               core_num++;
+                       }
+               }
+
+               mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+
+               /* if the group is virtual, loop through physical groups which belong to this group
+                * and call profiling events for its cores as virtual */
+               if (MALI_TRUE == mali_group_is_virtual(group))
+               {
+                       struct mali_group *child;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+                       {
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+                       }
+#if defined(CONFIG_MALI400_PROFILING)
+                       if (0 != group->l2_cache_core_ref_count[0])
+                       {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                                                       (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                               {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                               }
+                       }
+                       if (0 != group->l2_cache_core_ref_count[1])
+                       {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                                                                       (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1])))
+                               {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                               }
+                       }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+               }
+               else /* group is physical - call profiling events for physical cores */
+               {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                                     mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                                     mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                       (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+               }
+               group->pp_running_job = job;
+               group->pp_running_sub_job = sub_job;
+               group->state = MALI_GROUP_STATE_WORKING;
+
+               /* Setup the timeout timer value and save the job id for the job running on the pp core */
+               _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (group->state != MALI_GROUP_STATE_OOM ||
+           mali_gp_job_get_id(group->gp_running_job) != job_id)
+       {
+               return NULL; /* Illegal request or job has already been aborted */
+       }
+
+       if (NULL != group->l2_cache_core[0])
+       {
+               mali_l2_cache_invalidate_all_force(group->l2_cache_core[0]);
+       }
+
+       mali_mmu_zap_tlb_without_stall(group->mmu);
+
+       mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+
+       group->state = MALI_GROUP_STATE_WORKING;
+
+       return group->gp_running_job;
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+       struct mali_group *child;
+       struct mali_group *temp;
+
+       /* TODO: If we *know* that the group is idle, this could be faster. */
+
+       mali_pp_reset_async(group->pp_core);
+
+//     if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) //LWJ
+/* MALI_SEC */
+       if (!mali_group_is_virtual(group) || NULL != group->pp_running_job)
+       {
+               /* This is a physical group or an idle virtual group -- simply wait for
+                * the reset to complete. */
+               mali_pp_reset_wait(group->pp_core);
+       }
+       else /* virtual group */
+       {
+               /* Loop through all members of this virtual group and wait until they
+                * are done resetting.
+                */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+               {
+                       mali_pp_reset_wait(child->pp_core);
+               }
+       }
+}
+
+static void mali_group_complete_pp(struct mali_group *group, mali_bool success)
+{
+       struct mali_pp_job *pp_job_to_return;
+       u32 pp_sub_job_to_return;
+
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       mali_group_post_process_job_pp(group);
+
+       mali_pp_reset_async(group->pp_core);
+
+       pp_job_to_return = group->pp_running_job;
+       pp_sub_job_to_return = group->pp_running_sub_job;
+       group->state = MALI_GROUP_STATE_IDLE;
+       group->pp_running_job = NULL;
+
+       mali_group_deactivate_page_directory(group, group->session);
+
+       if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core))
+       {
+               MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset PP, need to reset entire group\n"));
+
+               mali_group_recovery_reset(group);
+       }
+
+       mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success);
+}
+
+static void mali_group_complete_gp(struct mali_group *group, mali_bool success)
+{
+       struct mali_gp_job *gp_job_to_return;
+
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       mali_group_post_process_job_gp(group, MALI_FALSE);
+
+       mali_gp_reset_async(group->gp_core);
+
+       gp_job_to_return = group->gp_running_job;
+       group->state = MALI_GROUP_STATE_IDLE;
+       group->gp_running_job = NULL;
+
+       mali_group_deactivate_page_directory(group, group->session);
+
+       if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core))
+       {
+               MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset GP, need to reset entire group\n"));
+
+               mali_group_recovery_reset(group);
+       }
+
+       mali_gp_scheduler_job_done(group, gp_job_to_return, success);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (group->state == MALI_GROUP_STATE_IDLE ||
+           mali_gp_job_get_id(group->gp_running_job) != job_id)
+       {
+               return; /* No need to cancel or job has already been aborted or completed */
+       }
+
+       mali_group_complete_gp(group, MALI_FALSE);
+}
+
+static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (group->state == MALI_GROUP_STATE_IDLE ||
+           mali_pp_job_get_id(group->pp_running_job) != job_id)
+       {
+               return; /* No need to cancel or job has already been aborted or completed */
+       }
+
+       mali_group_complete_pp(group, MALI_FALSE);
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+       struct mali_gp_job *gp_job;
+       struct mali_pp_job *pp_job;
+       u32 gp_job_id = 0;
+       u32 pp_job_id = 0;
+       mali_bool abort_pp = MALI_FALSE;
+       mali_bool abort_gp = MALI_FALSE;
+
+       mali_group_lock(group);
+
+       if (mali_group_is_in_virtual(group))
+       {
+               /* Group is member of a virtual group, don't touch it! */
+               mali_group_unlock(group);
+               return;
+       }
+
+       gp_job = group->gp_running_job;
+       pp_job = group->pp_running_job;
+
+       if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session))
+       {
+               MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+               gp_job_id = mali_gp_job_get_id(gp_job);
+               abort_gp = MALI_TRUE;
+       }
+
+       if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session))
+       {
+               MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+               pp_job_id = mali_pp_job_get_id(pp_job);
+               abort_pp = MALI_TRUE;
+       }
+
+//     if (0 != abort_gp) //LWJ ORG
+       if (abort_gp)
+       {
+               mali_group_abort_gp_job(group, gp_job_id);
+       }
+//     if (0 != abort_pp) //LWJ ORG
+       if (abort_pp)
+       {
+               mali_group_abort_pp_job(group, pp_job_id);
+       }
+
+       mali_group_remove_session_if_unused(group, session);
+
+       mali_group_unlock(group);
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+       if(mali_global_num_groups > index)
+       {
+               return mali_global_groups[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+       return mali_global_num_groups;
+}
+
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+       enum mali_group_activate_pd_status retval;
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+       MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+
+       if (0 != group->page_dir_ref_count)
+       {
+               if (group->session != session)
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali group: Activating session FAILED: 0x%08x on group 0x%08X. Existing session: 0x%08x\n", session, group, group->session));
+                       return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali group: Activating session already activated: 0x%08x on group 0x%08X. New Ref: %d\n", session, group, 1+group->page_dir_ref_count));
+                       retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+
+               }
+       }
+       else
+       {
+               /* There might be another session here, but it is ok to overwrite it since group->page_dir_ref_count==0 */
+               if (group->session != session)
+               {
+                       mali_bool activate_success;
+                       MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X. Ref: %d\n", session, group->session, group, 1+group->page_dir_ref_count));
+
+                       activate_success = mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+                       MALI_DEBUG_ASSERT(activate_success);
+                       if ( MALI_FALSE== activate_success ) return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+                       group->session = session;
+                       retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD;
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X. Ref: %d\n", session->page_directory, group, 1+group->page_dir_ref_count));
+                       retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+               }
+       }
+
+       group->page_dir_ref_count++;
+       return retval;
+}
+
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       MALI_DEBUG_ASSERT(0 < group->page_dir_ref_count);
+       MALI_DEBUG_ASSERT(session == group->session);
+
+       group->page_dir_ref_count--;
+
+       /* As an optimization, the MMU still points to the group->session even if (0 == group->page_dir_ref_count),
+          and we do not call mali_mmu_activate_empty_page_directory(group->mmu); */
+       MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+}
+
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (0 == group->page_dir_ref_count)
+       {
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
+
+               if (group->session == session)
+               {
+                       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+                       MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+                       mali_mmu_activate_empty_page_directory(group->mmu);
+                       group->session = NULL;
+               }
+       }
+}
+
+void mali_group_power_on(void)
+{
+       int i;
+       for (i = 0; i < mali_global_num_groups; i++)
+       {
+               struct mali_group *group = mali_global_groups[i];
+               mali_group_lock(group);
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+               group->power_is_on = MALI_TRUE;
+
+               if (NULL != group->l2_cache_core[0])
+               {
+                       mali_l2_cache_power_is_enabled_set(group->l2_cache_core[0], MALI_TRUE);
+               }
+
+               if (NULL != group->l2_cache_core[1])
+               {
+                       mali_l2_cache_power_is_enabled_set(group->l2_cache_core[1], MALI_TRUE);
+               }
+
+               mali_group_unlock(group);
+       }
+       MALI_DEBUG_PRINT(4,("group: POWER ON\n"));
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+       return group->power_is_on;
+}
+
+void mali_group_power_off(void)
+{
+       int i;
+       /* It is necessary to set group->session = NULL; so that the powered off MMU is not written to on map /unmap */
+       /* It is necessary to set group->power_is_on=MALI_FALSE so that pending bottom_halves does not access powered off cores. */
+       for (i = 0; i < mali_global_num_groups; i++)
+       {
+               struct mali_group *group = mali_global_groups[i];
+               mali_group_lock(group);
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+               group->session = NULL;
+               group->power_is_on = MALI_FALSE;
+
+               if (NULL != group->l2_cache_core[0])
+               {
+                       mali_l2_cache_power_is_enabled_set(group->l2_cache_core[0], MALI_FALSE);
+               }
+
+               if (NULL != group->l2_cache_core[1])
+               {
+                       mali_l2_cache_power_is_enabled_set(group->l2_cache_core[1], MALI_FALSE);
+               }
+
+               mali_group_unlock(group);
+       }
+       MALI_DEBUG_PRINT(4,("group: POWER OFF\n"));
+}
+
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       /* Stop cores, bus stop */
+       if (NULL != group->pp_core)
+       {
+               mali_pp_stop_bus(group->pp_core);
+       }
+       else
+       {
+               mali_gp_stop_bus(group->gp_core);
+       }
+
+       /* Flush MMU and clear page fault (if any) */
+       mali_mmu_activate_fault_flush_page_directory(group->mmu);
+       mali_mmu_page_fault_done(group->mmu);
+
+       /* Wait for cores to stop bus, then do a hard reset on them */
+       if (NULL != group->pp_core)
+       {
+               if (mali_group_is_virtual(group))
+               {
+                       struct mali_group *child, *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+                       {
+                               mali_pp_stop_bus_wait(child->pp_core);
+                               mali_pp_hard_reset(child->pp_core);
+                       }
+               }
+               else
+               {
+                       mali_pp_stop_bus_wait(group->pp_core);
+                       mali_pp_hard_reset(group->pp_core);
+               }
+       }
+       else
+       {
+               mali_gp_stop_bus_wait(group->gp_core);
+               mali_gp_hard_reset(group->gp_core);
+       }
+
+       /* Reset MMU */
+       mali_mmu_reset(group->mmu);
+       group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+       n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
+       if (group->gp_core)
+       {
+               n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+       }
+       if (group->pp_core)
+       {
+               n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+                                       group->pp_running_job, group->pp_running_sub_job);
+       }
+
+       return n;
+}
+#endif
+
+static void mali_group_mmu_page_fault(struct mali_group *group)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (NULL != group->pp_core)
+       {
+               struct mali_pp_job *pp_job_to_return;
+               u32 pp_sub_job_to_return;
+
+               MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+
+               mali_group_post_process_job_pp(group);
+
+               pp_job_to_return = group->pp_running_job;
+               pp_sub_job_to_return = group->pp_running_sub_job;
+               group->state = MALI_GROUP_STATE_IDLE;
+               group->pp_running_job = NULL;
+
+               mali_group_deactivate_page_directory(group, group->session);
+
+               mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+               mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE);
+       }
+       else
+       {
+               struct mali_gp_job *gp_job_to_return;
+
+               MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+
+               mali_group_post_process_job_gp(group, MALI_FALSE);
+
+               gp_job_to_return = group->gp_running_job;
+               group->state = MALI_GROUP_STATE_IDLE;
+               group->gp_running_job = NULL;
+
+               mali_group_deactivate_page_directory(group, group->session);
+
+               mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+               mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+       }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_mmu_core *mmu = group->mmu;
+       u32 int_stat;
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+       int_stat = mali_mmu_get_int_status(mmu);
+       if (0 != int_stat)
+       {
+               struct mali_group *parent = group->parent_group;
+
+               /* page fault or bus error, we thread them both in the same way */
+               mali_mmu_mask_all_interrupts(mmu);
+               if (NULL == parent)
+               {
+                       _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+               }
+               else
+               {
+                       _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
+               }
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_group_bottom_half_mmu(void * data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_mmu_core *mmu = group->mmu;
+       u32 rawstat;
+       u32 status;
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       mali_group_lock(group);
+
+       /* TODO: Remove some of these asserts? Will we ever end up in
+        * "physical" bottom half for a member of the virtual group? */
+       MALI_DEBUG_ASSERT(NULL == group->parent_group);
+       MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+       if ( MALI_FALSE == mali_group_power_is_on(group) )
+       {
+               MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
+               mali_group_unlock(group);
+               return;
+       }
+
+       rawstat = mali_mmu_get_rawstat(mmu);
+       status = mali_mmu_get_status(mmu);
+
+       MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
+
+       if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+       {
+               /* An actual page fault has occurred. */
+               u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
+               MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+                                (void*)fault_address,
+                                (status >> 6) & 0x1F,
+                                (status & 32) ? "write" : "read",
+                                mmu->hw_core.description));
+               MALI_IGNORE(fault_address);
+
+               mali_group_mmu_page_fault(group);
+       }
+
+       mali_group_unlock(group);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_gp_core *core = group->gp_core;
+       u32 irq_readout;
+
+       irq_readout = mali_gp_get_int_stat(core);
+
+       if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout)
+       {
+               /* Mask out all IRQs from this core until IRQ is handled */
+               mali_gp_mask_all_interrupts(core);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+
+               /* We do need to handle this in a bottom half */
+               _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       u32 irq_readout;
+       u32 irq_errors;
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
+
+       mali_group_lock(group);
+
+       if ( MALI_FALSE == mali_group_power_is_on(group) )
+       {
+               MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
+               mali_group_unlock(group);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       irq_readout = mali_gp_read_rawstat(group->gp_core);
+
+       MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+
+       if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST))
+       {
+               u32 core_status = mali_gp_read_core_status(group->gp_core);
+               if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
+                       group->core_timed_out = MALI_FALSE;
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                                     0, _mali_osk_get_tid(), 0, 0, 0);
+                       mali_group_complete_gp(group, MALI_TRUE);
+                       mali_group_unlock(group);
+                       return;
+               }
+       }
+
+       /*
+        * Now lets look at the possible error cases (IRQ indicating error or timeout)
+        * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+        */
+       irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+       if (0 != irq_errors)
+       {
+               MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+               group->core_timed_out = MALI_FALSE;
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               mali_group_complete_gp(group, MALI_FALSE);
+               mali_group_unlock(group);
+               return;
+       }
+       else if (group->core_timed_out) /* SW timeout */
+       {
+               group->core_timed_out = MALI_FALSE;
+               if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job)
+               {
+                       MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
+                       mali_group_complete_gp(group, MALI_FALSE);
+                       mali_group_unlock(group);
+                       return;
+               }
+       }
+       else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+       {
+               /* GP wants more memory in order to continue. */
+               MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
+
+               group->state = MALI_GROUP_STATE_OOM;
+               mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+               mali_gp_scheduler_oom(group, group->gp_running_job);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       /*
+        * The only way to get here is if we only got one of two needed END_CMD_LST
+        * interrupts. Enable all but not the complete interrupt that has been
+        * received and continue to run.
+        */
+       mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
+       mali_group_unlock(group);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
+{
+       /* Stop the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
+
+       if (NULL == group->gp_running_job)
+       {
+               /* Nothing to do */
+               return;
+       }
+
+       mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
+
+#if defined(CONFIG_MALI400_PROFILING)
+       if (suspend)
+       {
+               /* @@@@ todo: test this case and see if it is still working*/
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                                             0, 0);
+       }
+       else
+       {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                                             0, 0);
+
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                               (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       mali_group_report_l2_cache_counters_per_core(group, 0);
+       }
+#endif
+
+       mali_gp_job_set_current_heap_addr(group->gp_running_job,
+                                         mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_pp_core *core = group->pp_core;
+       u32 irq_readout;
+
+       /*
+        * For Mali-450 there is one particular case we need to watch out for:
+        *
+        * Criteria 1) this function call can be due to a shared interrupt,
+        * and not necessary because this core signaled an interrupt.
+        * Criteria 2) this core is a part of a virtual group, and thus it should
+        * not do any post processing.
+        * Criteria 3) this core has actually indicated that is has completed by
+        * having set raw_stat/int_stat registers to != 0
+        *
+        * If all this criteria is meet, then we could incorrectly start post
+        * processing on the wrong group object (this should only happen on the
+        * parent group)
+        */
+#if !defined(MALI_UPPER_HALF_SCHEDULING)
+       if (mali_group_is_in_virtual(group))
+       {
+               /*
+                * This check is done without the group lock held, which could lead to
+                * a potential race. This is however ok, since we will safely re-check
+                * this with the group lock held at a later stage. This is just an
+                * early out which will strongly benefit shared IRQ systems.
+                */
+               return _MALI_OSK_ERR_OK;
+       }
+#endif
+
+       irq_readout = mali_pp_get_int_stat(core);
+       if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout)
+       {
+               /* Mask out all IRQs from this core until IRQ is handled */
+               mali_pp_mask_all_interrupts(core);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               /* Currently no support for this interrupt event for the virtual PP core */
+               if (!mali_group_is_virtual(group))
+               {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
+                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
+                                                     irq_readout, 0, 0, 0, 0);
+               }
+#endif
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+               if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME)
+               {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+                       MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
+
+                       mali_group_lock(group);
+
+                       /* Read int stat again */
+                       irq_readout = mali_pp_read_rawstat(core);
+                       if (!(irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME))
+                       {
+                               /* There was nothing to do */
+                               mali_pp_enable_interrupts(core);
+                               mali_group_unlock(group);
+                               return _MALI_OSK_ERR_OK;
+                       }
+
+                       if (mali_group_is_in_virtual(group))
+                       {
+                               /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+                               mali_pp_enable_interrupts(core);
+                               mali_group_unlock(group);
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                             0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+                       mali_group_complete_pp(group, MALI_TRUE);
+                       /* No need to enable interrupts again, since the core will be reset while completing the job */
+
+                       mali_group_unlock(group);
+
+                       return _MALI_OSK_ERR_OK;
+               }
+#endif
+
+               /* We do need to handle this in a bottom half */
+               _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_pp_core *core = group->pp_core;
+       u32 irq_readout;
+       u32 irq_errors;
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+       mali_group_lock(group);
+
+       if (mali_group_is_in_virtual(group))
+       {
+               /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+               mali_pp_enable_interrupts(core);
+               mali_group_unlock(group);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       if ( MALI_FALSE == mali_group_power_is_on(group) )
+       {
+               MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
+               mali_group_unlock(group);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       irq_readout = mali_pp_read_rawstat(group->pp_core);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+
+       if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME)
+       {
+               MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+               group->core_timed_out = MALI_FALSE;
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               mali_group_complete_pp(group, MALI_TRUE);
+               mali_group_unlock(group);
+               return;
+       }
+
+       /*
+        * Now lets look at the possible error cases (IRQ indicating error or timeout)
+        * END_OF_FRAME and HANG interrupts are not considered error.
+        */
+       irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
+       if (0 != irq_errors)
+       {
+               MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n",
+                                 irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+               group->core_timed_out = MALI_FALSE;
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               mali_group_complete_pp(group, MALI_FALSE);
+               mali_group_unlock(group);
+               return;
+       }
+       else if (group->core_timed_out) /* SW timeout */
+       {
+               group->core_timed_out = MALI_FALSE;
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job)
+               {
+                       MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
+                                   mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
+                       mali_group_complete_pp(group, MALI_FALSE);
+                       mali_group_unlock(group);
+               }
+               else
+               {
+                       mali_group_unlock(group);
+               }
+               return;
+       }
+
+       /*
+        * We should never get here, re-enable interrupts and continue
+        */
+       if (0 == irq_readout)
+       {
+               MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
+                                   mali_pp_get_hw_core_desc(group->pp_core)));
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
+                                   mali_pp_get_hw_core_desc(group->pp_core)));
+       }
+       mali_pp_enable_interrupts(core);
+       mali_group_unlock(group);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_pp(struct mali_group *group)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       /* Stop the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
+
+       /*todo add stop SW counters profiling*/
+
+       if (NULL != group->pp_running_job)
+       {
+               if (MALI_TRUE == mali_group_is_virtual(group))
+               {
+                       struct mali_group *child;
+                       struct mali_group *temp;
+
+                       /* update performance counters from each physical pp core within this virtual group */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+                       {
+                               mali_pp_update_performance_counters(child->pp_core, group->pp_running_job, group->pp_running_sub_job);
+                       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       /* send profiling data per physical core */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+                       {
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
+                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                             mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_src0(group->pp_running_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job) << 8),
+                                                             0, 0);
+                       }
+                       if (0 != group->l2_cache_core_ref_count[0])
+                       {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                                                       (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                               {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                               }
+                       }
+                       if (0 != group->l2_cache_core_ref_count[1])
+                       {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                                                                       (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1])))
+                               {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                               }
+                       }
+
+#endif
+               }
+               else
+               {
+                       /* update performance counters for a physical group's pp core */
+                       mali_pp_update_performance_counters(group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                                     mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_src0(group->pp_running_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job) << 8),
+                                                     0, 0);
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                       (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
+#endif
+               }
+       }
+}
+
+static void mali_group_timeout(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+
+       group->core_timed_out = MALI_TRUE;
+
+       if (NULL != group->gp_core)
+       {
+               MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
+               _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+       }
+       else
+       {
+               MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
+               _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+       }
+}
+
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       /* Early out - safe even if mutex is not held */
+       if (group->session != session) return;
+
+       mali_group_lock(group);
+
+       mali_group_remove_session_if_unused(group, session);
+
+       if (group->session == session)
+       {
+               /* The Zap also does the stall and disable_stall */
+               mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+               if (MALI_TRUE != zap_success)
+               {
+                       MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+                       mali_group_mmu_page_fault(group);
+               }
+       }
+
+       mali_group_unlock(group);
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+       u32 source0 = 0;
+       u32 value0 = 0;
+       u32 source1 = 0;
+       u32 value1 = 0;
+       u32 profiling_channel = 0;
+
+       switch(core_num)
+       {
+               case 0: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+                               break;
+               case 1: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+                               break;
+               case 2: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+                               break;
+               default: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+                               break;
+       }
+
+       if (0 == core_num)
+       {
+               mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+       }
+       if (1 == core_num)
+       {
+               if (1 == mali_l2_cache_get_id(group->l2_cache_core[0]))
+               {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+               }
+               else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1]))
+               {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+               }
+       }
+       if (2 == core_num)
+       {
+               if (2 == mali_l2_cache_get_id(group->l2_cache_core[0]))
+               {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+               }
+               else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1]))
+               {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+               }
+       }
+
+       _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_group.h b/drivers/gpu/arm/mali400/mali/common/mali_group.h
new file mode 100644 (file)
index 0000000..7a40da2
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/* max runtime [ms] for a core job - used by timeout timers  */
+#define MAX_RUNTIME 5000
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+
+enum mali_group_core_state
+{
+       MALI_GROUP_STATE_IDLE,
+       MALI_GROUP_STATE_WORKING,
+       MALI_GROUP_STATE_OOM,
+       MALI_GROUP_STATE_IN_VIRTUAL,
+       MALI_GROUP_STATE_JOINING_VIRTUAL,
+       MALI_GROUP_STATE_LEAVING_VIRTUAL,
+};
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group
+{
+       struct mali_mmu_core        *mmu;
+       struct mali_session_data    *session;
+       int                         page_dir_ref_count;
+
+       mali_bool                   power_is_on;
+       enum mali_group_core_state  state; /* @@@@ TODO: include power_is_on in this state? */
+
+       struct mali_gp_core         *gp_core;
+       struct mali_gp_job          *gp_running_job;
+
+       struct mali_pp_core         *pp_core;
+       struct mali_pp_job          *pp_running_job;
+       u32                         pp_running_sub_job;
+
+       struct mali_l2_cache_core   *l2_cache_core[2];
+       u32                         l2_cache_core_ref_count[2];
+
+       struct mali_dlbu_core       *dlbu_core;
+       struct mali_bcast_unit      *bcast_core;
+
+       _mali_osk_lock_t            *lock;
+
+       _mali_osk_list_t            pp_scheduler_list;
+
+       /* List used for virtual groups. For a virtual group, the list represents the
+        * head element. */
+       _mali_osk_list_t            group_list;
+
+       /* Parent virtual group (if any) */
+       struct mali_group           *parent_group;
+
+       _mali_osk_wq_work_t         *bottom_half_work_mmu;
+       _mali_osk_wq_work_t         *bottom_half_work_gp;
+       _mali_osk_wq_work_t         *bottom_half_work_pp;
+
+       _mali_osk_timer_t           *timeout_timer;
+       mali_bool                   core_timed_out;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+                                     struct mali_dlbu_core *dlbu,
+                                    struct mali_bcast_unit *bcast);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+       return (NULL != group->dlbu_core);
+}
+
+/** @brief Check if a group is considered as part of a virtual group
+ *
+ * @note A group is considered to be "part of" a virtual group also during the transition
+ *       in to / out of the virtual group.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+       return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
+               MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
+               MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+void mali_group_zap_session(struct mali_group* group, struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+/** @brief Start fragment of PP job
+ */
+_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+void mali_group_power_on(void);
+void mali_group_power_off(void);
+mali_bool mali_group_power_is_on(struct mali_group *group);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+/* MMU-related functions */
+_mali_osk_errcode_t mali_group_upper_half_mmu(void * data);
+
+/* GP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+
+/* PP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_hw_core.c b/drivers/gpu/arm/mali400/mali/common/mali_hw_core.c
new file mode 100644 (file)
index 0000000..264f3ae
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+       core->phys_addr = resource->base;
+       core->description = resource->description;
+       core->size = reg_size;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description))
+       {
+               core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+               if (NULL != core->mapped_registers)
+               {
+                       return _MALI_OSK_ERR_OK;
+               }
+               else
+               {
+                       MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+               }
+               _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+       _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+       core->mapped_registers = NULL;
+       _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_hw_core.h b/drivers/gpu/arm/mali400/mali/common/mali_hw_core.h
new file mode 100644 (file)
index 0000000..bea4266
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core
+{
+       u32 phys_addr;                    /**< Physical address of the registers */
+       u32 size;                         /**< Size of registers */
+       mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+       const char* description;          /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+       u32 read_val;
+       read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+       MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, read_val));
+       return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+                             core->description, relative_address, new_val));
+       _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+                             core->description, relative_address, new_val));
+       if(old_val != new_val)
+       {
+               _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+       }
+}
+
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+                             core->description, relative_address, new_val));
+       _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+       u32 i;
+       MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+                            core->description,relative_address, nr_of_regs));
+
+       /* Do not use burst writes against the registers */
+       for (i = 0; i< nr_of_regs; i++)
+       {
+               mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+       }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32* old_array)
+{
+       u32 i;
+       MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+                            core->description,relative_address, nr_of_regs));
+
+       /* Do not use burst writes against the registers */
+       for (i = 0; i< nr_of_regs; i++)
+       {
+               if(old_array[i] != write_array[i])
+               {
+                       mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+               }
+       }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_common.h b/drivers/gpu/arm/mali400/mali/common/mali_kernel_common.h
new file mode 100644 (file)
index 0000000..6d60d11
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+       #if defined(_DEBUG)
+               #define DEBUG
+       #endif
+#endif
+
+/* MALI_SEC */
+/* Macro for generating a kernel panic.
+ * Turned on off by compile-time Makefile settings
+ */
+#if defined(USING_KERNEL_PANIC)
+#include <linux/kernel.h>
+       #define MALI_PANIC(fmt, args...) panic( fmt, ## args );
+#else
+       #define MALI_PANIC(fmt, args...) 
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...)           Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) )    Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer)  Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X )       The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The  printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ *  Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ *  easily switch to a different error reporting method if we want, and also to allow
+ *  us to search for error returns easily.
+ *
+ *  Note no closing semicolon - this is supplied in typical usage:
+ *
+ *  MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ *  Basic error macro, to indicate success.
+ *  Note no closing semicolon - this is supplied in typical usage:
+ *
+ *  MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ *     Basic error macro. This checks whether the given condition is true, and if not returns
+ *     from this function with the supplied error code. This is a macro so that we can override it
+ *     for stress testing.
+ *
+ *     Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ *     else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ *     MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ *     Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ *     then the value is returned from the enclosing function as an error code. This effectively
+ *     acts as a guard clause, and propagates error values up the call stack. This uses a
+ *     temporary value to ensure that the error expression is not evaluated twice.
+ *  If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ *  returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+    do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+         if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+         MALI_ERROR(_check_no_error_result); \
+    } while(0)
+
+/**
+ *  Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ *     Error macro with goto. This checks whether the given condition is true, and if not jumps
+ *     to the specified label using a goto. The label must therefore be local to the function in
+ *     which this macro appears. This is most usually used to execute some clean-up code before
+ *     exiting with a call to ERROR.
+ *
+ *     Like the other macros, this is a macro to allow us to override the condition if we wish,
+ *     e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ *  Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ *  Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+       MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+       MALI_PRINTF(("           %s()%4d\n           ", __FUNCTION__, __LINE__)) ; \
+       MALI_PRINTF(args); \
+       MALI_PRINTF(("\n")); \
+       } while(0)
+
+#define MALI_PRINT(args) do{ \
+       MALI_PRINTF(("Mali: ")); \
+       MALI_PRINTF(args); \
+       } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args)  do { \
+       if((level) <=  mali_debug_level)\
+        {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+       } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args)  \
+       if((condition)&&((level) <=  mali_debug_level))\
+        {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+       else if((level) <=  mali_debug_level)\
+    { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do  {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do  {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do  {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_core.c b/drivers/gpu/arm/mali400/mali/common/mali_kernel_core.c
new file mode 100644 (file)
index 0000000..731d122
--- /dev/null
@@ -0,0 +1,1150 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+int mali_dedicated_mem_start = 0;
+int mali_dedicated_mem_size = 0;
+int mali_shared_mem_size = 0;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+/* MALI_SEC */
+static u32 first_pp_offset = 0;
+
+#define HANG_CHECK_MSECS_DEFAULT 500 /* 500 ms */
+#define WATCHDOG_MSECS_DEFAULT 4000 /* 4 s */
+
+/* timer related */
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+int mali_hang_check_interval = HANG_CHECK_MSECS_DEFAULT;
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+       /*
+        * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
+        * Look at the version register for the first PP core in order to determine the GPU HW revision.
+        */
+
+       u32 first_pp_offset;
+       _mali_osk_resource_t first_pp_resource;
+
+       global_gpu_base_address = _mali_osk_resource_base_address();
+       if (0 == global_gpu_base_address)
+       {
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       /* Find out where the first PP core is located */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL))
+       {
+               /* Mali-300/400/450 */
+               first_pp_offset = 0x8000;
+       }
+       else
+       {
+               /* Mali-200 */
+               first_pp_offset = 0x0000;
+       }
+
+       /* Find the first PP core resource (again) */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource))
+       {
+               /* Create a dummy PP object for this core so that we can read the version register */
+               struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+               if (NULL != group)
+               {
+                       struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE);
+                       if (NULL != pp_core)
+                       {
+                               u32 pp_version = mali_pp_core_get_version(pp_core);
+                               mali_group_delete(group);
+
+                               global_gpu_major_version = (pp_version >> 8) & 0xFF;
+                               global_gpu_minor_version = pp_version & 0xFF;
+
+                               switch (pp_version >> 16)
+                               {
+                                       case MALI200_PP_PRODUCT_ID:
+                                               global_product_id = _MALI_PRODUCT_ID_MALI200;
+                                               MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                               break;
+                                       case MALI300_PP_PRODUCT_ID:
+                                               global_product_id = _MALI_PRODUCT_ID_MALI300;
+                                               MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                               break;
+                                       case MALI400_PP_PRODUCT_ID:
+                                               global_product_id = _MALI_PRODUCT_ID_MALI400;
+                                               MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                               break;
+                                       case MALI450_PP_PRODUCT_ID:
+                                               global_product_id = _MALI_PRODUCT_ID_MALI450;
+                                               MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                               break;
+                                       default:
+                                               MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+                                               return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               return _MALI_OSK_ERR_OK;
+                       }
+                       else
+                       {
+                               MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+                       }
+               }
+               else
+               {
+                       MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+               }
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+void mali_resource_count(u32 *pp_count, u32 *l2_count)
+{
+       *pp_count = 0;
+       *l2_count = 0;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL))
+       {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL))
+       {
+               ++(*pp_count);
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL))
+       {
+               ++(*l2_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL))
+       {
+               ++(*l2_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL))
+       {
+               ++(*l2_count);
+       }
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+       u32 i;
+       u32 number_of_l2_ccores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+       for (i = 0; i < number_of_l2_ccores; i++)
+       {
+               mali_l2_cache_delete(mali_l2_cache_core_get_glob_l2_core(i));
+       }
+}
+
+static _mali_osk_errcode_t mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+{
+       if (NULL != resource)
+       {
+               struct mali_l2_cache_core *l2_cache;
+
+               MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+               l2_cache = mali_l2_cache_create(resource);
+               if (NULL == l2_cache)
+               {
+                       MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+       MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+       if (_MALI_PRODUCT_ID_MALI200 == global_product_id)
+       {
+               /* Create dummy L2 cache - nothing happens here!!! */
+               return mali_create_l2_cache_core(NULL);
+       }
+       else if (_MALI_PRODUCT_ID_MALI300 == global_product_id || _MALI_PRODUCT_ID_MALI400 == global_product_id)
+       {
+               _mali_osk_resource_t l2_resource;
+               if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource))
+               {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               return mali_create_l2_cache_core(&l2_resource);
+       }
+       else if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+       {
+               /*
+                * L2 for GP    at 0x10000
+                * L2 for PP0-3 at 0x01000
+                * L2 for PP4-7 at 0x11000 (optional)
+                */
+
+               _mali_osk_resource_t l2_gp_resource;
+               _mali_osk_resource_t l2_pp_grp0_resource;
+               _mali_osk_resource_t l2_pp_grp1_resource;
+
+               /* Make cluster for GP's L2 */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource))
+               {
+                       _mali_osk_errcode_t ret;
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+                       ret = mali_create_l2_cache_core(&l2_gp_resource);
+                       if (_MALI_OSK_ERR_OK != ret)
+                       {
+                               return ret;
+                       }
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               /* Make cluster for first PP core group */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource))
+               {
+                       _mali_osk_errcode_t ret;
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+                       ret = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+                       if (_MALI_OSK_ERR_OK != ret)
+                       {
+                               return ret;
+                       }
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               /* Second PP core group is optional, don't fail if we don't find it */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource))
+               {
+                       _mali_osk_errcode_t ret;
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+                       ret = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+                       if (_MALI_OSK_ERR_OK != ret)
+                       {
+                               return ret;
+                       }
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_group(struct mali_l2_cache_core *cache,
+                                             _mali_osk_resource_t *resource_mmu,
+                                             _mali_osk_resource_t *resource_gp,
+                                             _mali_osk_resource_t *resource_pp)
+{
+       struct mali_mmu_core *mmu;
+       struct mali_group *group;
+
+       MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+       /* Create the group object */
+       group = mali_group_create(cache, NULL, NULL);
+       if (NULL == group)
+       {
+               MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the MMU object inside group */
+       mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+       if (NULL == mmu)
+       {
+               MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (NULL != resource_gp)
+       {
+               /* Create the GP core object inside this group */
+               struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+               if (NULL == gp_core)
+               {
+                       /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+                       MALI_PRINT_ERROR(("Failed to create GP object\n"));
+                       mali_group_delete(group);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       if (NULL != resource_pp)
+       {
+               struct mali_pp_core *pp_core;
+
+               /* Create the PP core object inside this group */
+               pp_core = mali_pp_create(resource_pp, group, MALI_FALSE);
+               if (NULL == pp_core)
+               {
+                       /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+                       MALI_PRINT_ERROR(("Failed to create PP object\n"));
+                       mali_group_delete(group);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       /* Reset group */
+       mali_group_reset(group);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+                                                    _mali_osk_resource_t *resource_pp_bcast,
+                                                    _mali_osk_resource_t *resource_dlbu,
+                                                    _mali_osk_resource_t *resource_bcast)
+{
+       struct mali_mmu_core *mmu_pp_bcast_core;
+       struct mali_pp_core *pp_bcast_core;
+       struct mali_dlbu_core *dlbu_core;
+       struct mali_bcast_unit *bcast_core;
+       struct mali_group *group;
+
+       MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+       /* Create the DLBU core object */
+       dlbu_core = mali_dlbu_create(resource_dlbu);
+       if (NULL == dlbu_core)
+       {
+               MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the Broadcast unit core */
+       bcast_core = mali_bcast_unit_create(resource_bcast);
+       if (NULL == bcast_core)
+       {
+               MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+               mali_dlbu_delete(dlbu_core);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the group object */
+       group = mali_group_create(NULL, dlbu_core, bcast_core);
+       if (NULL == group)
+       {
+               MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+               mali_bcast_unit_delete(bcast_core);
+               mali_dlbu_delete(dlbu_core);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the MMU object inside group */
+       mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+       if (NULL == mmu_pp_bcast_core)
+       {
+               MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the PP core object inside this group */
+       pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE);
+       if (NULL == pp_bcast_core)
+       {
+               /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+               MALI_PRINT_ERROR(("Failed to create PP object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+       if (_MALI_PRODUCT_ID_MALI200 == global_product_id)
+       {
+               _mali_osk_errcode_t err;
+               _mali_osk_resource_t resource_gp;
+               _mali_osk_resource_t resource_pp;
+               _mali_osk_resource_t resource_mmu;
+
+               MALI_DEBUG_ASSERT(1 == mali_l2_cache_core_get_glob_num_l2_cores());
+
+               if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_gp) ||
+                   _MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_pp) ||
+                   _MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_mmu))
+               {
+                       /* Missing mandatory core(s) */
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               err = mali_create_group(mali_l2_cache_core_get_glob_l2_core(0), &resource_mmu, &resource_gp, &resource_pp);
+               if (err == _MALI_OSK_ERR_OK)
+               {
+                       mali_inited_pp_cores_group_1++;
+                       mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1; /* always 1 */
+                       mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2; /* always zero */
+               }
+
+               return err;
+       }
+       else if (_MALI_PRODUCT_ID_MALI300 == global_product_id ||
+                _MALI_PRODUCT_ID_MALI400 == global_product_id ||
+                _MALI_PRODUCT_ID_MALI450 == global_product_id)
+       {
+               _mali_osk_errcode_t err;
+               int cluster_id_gp = 0;
+               int cluster_id_pp_grp0 = 0;
+               int cluster_id_pp_grp1 = 0;
+               int i;
+
+               _mali_osk_resource_t resource_gp;
+               _mali_osk_resource_t resource_gp_mmu;
+               _mali_osk_resource_t resource_pp[8];
+               _mali_osk_resource_t resource_pp_mmu[8];
+               _mali_osk_resource_t resource_pp_mmu_bcast;
+               _mali_osk_resource_t resource_pp_bcast;
+               _mali_osk_resource_t resource_dlbu;
+               _mali_osk_resource_t resource_bcast;
+               _mali_osk_errcode_t resource_gp_found;
+               _mali_osk_errcode_t resource_gp_mmu_found;
+               _mali_osk_errcode_t resource_pp_found[8];
+               _mali_osk_errcode_t resource_pp_mmu_found[8];
+               _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+               _mali_osk_errcode_t resource_pp_bcast_found;
+               _mali_osk_errcode_t resource_dlbu_found;
+               _mali_osk_errcode_t resource_bcast_found;
+
+               if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+               {
+                       /* Mali-450 have separate L2s for GP, and PP core group(s) */
+                       cluster_id_pp_grp0 = 1;
+                       cluster_id_pp_grp1 = 2;
+               }
+
+               resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
+               resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
+               resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
+               resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
+               resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
+               resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
+               resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
+               resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
+               resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
+               resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
+               resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
+               resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
+               resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
+               resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
+               resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
+               resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
+               resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
+               resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+
+
+               if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+               {
+                       resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
+                       resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
+                       resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
+                       resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+
+                       if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+                           _MALI_OSK_ERR_OK != resource_dlbu_found ||
+                           _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+                           _MALI_OSK_ERR_OK != resource_pp_bcast_found)
+                       {
+                               /* Missing mandatory core(s) for Mali-450 */
+                               MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               }
+
+               if (_MALI_OSK_ERR_OK != resource_gp_found ||
+                   _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+                   _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+                   _MALI_OSK_ERR_OK != resource_pp_mmu_found[0])
+               {
+                       /* Missing mandatory core(s) */
+                       MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+               err = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+               if (err != _MALI_OSK_ERR_OK)
+               {
+                       return err;
+               }
+
+               /* Create group for first (and mandatory) PP core */
+               MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+               err = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+               if (err != _MALI_OSK_ERR_OK)
+               {
+                       return err;
+               }
+
+               mali_inited_pp_cores_group_1++;
+
+               /* Create groups for rest of the cores in the first PP core group */
+               for (i = 1; i < 4; i++) /* First half of the PP cores belong to first core group */
+               {
+                       if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1)
+                       {
+                               if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i])
+                               {
+                                       err = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+                                       if (err != _MALI_OSK_ERR_OK)
+                                       {
+                                               return err;
+                                       }
+                                       mali_inited_pp_cores_group_1++;
+                               }
+                       }
+               }
+
+               /* Create groups for cores in the second PP core group */
+               for (i = 4; i < 8; i++) /* Second half of the PP cores belong to second core group */
+               {
+                       if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2)
+                       {
+                               if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i])
+                               {
+                                       MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+                                       err = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+                                       if (err != _MALI_OSK_ERR_OK)
+                                       {
+                                               return err;
+                                       }
+                                       mali_inited_pp_cores_group_2++;
+                               }
+                       }
+               }
+
+               if(_MALI_PRODUCT_ID_MALI450 == global_product_id)
+               {
+                       err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+                       if (_MALI_OSK_ERR_OK != err)
+                       {
+                               return err;
+                       }
+               }
+
+               mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+               mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+               MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+               return _MALI_OSK_ERR_OK;
+       }
+
+       /* No known HW core */
+       return _MALI_OSK_ERR_FAULT;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+       _mali_osk_resource_t resource_pmu;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu))
+       {
+               u32 number_of_pp_cores = 0;
+               u32 number_of_l2_caches = 0;
+
+               mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
+
+               if (NULL == mali_pmu_create(&resource_pmu, number_of_pp_cores, number_of_l2_caches))
+               {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       /* It's ok if the PMU doesn't exist */
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+       _mali_osk_errcode_t ret;
+
+       if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size)
+       {
+               /* Memory settings are not overridden by module parameters, so use device settings */
+               struct _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data))
+               {
+                       /* Use device specific settings (if defined) */
+                       mali_dedicated_mem_start = data.dedicated_mem_start;
+                       mali_dedicated_mem_size = data.dedicated_mem_size;
+                       mali_shared_mem_size = data.shared_mem_size;
+               }
+
+               if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size)
+               {
+                       /* No GPU memory specified */
+                       return _MALI_OSK_ERR_INVALID_ARGS;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+                                    mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+       }
+       else
+       {
+               MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+                                    mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+       }
+
+       if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start)
+       {
+               /* Dedicated memory */
+               ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+               if (_MALI_OSK_ERR_OK != ret)
+               {
+                       MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       if (0 < mali_shared_mem_size)
+       {
+               /* Shared OS memory */
+               ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+               if (_MALI_OSK_ERR_OK != ret)
+               {
+                       MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       if (0 == mali_fb_start && 0 == mali_fb_size)
+       {
+               /* Frame buffer settings are not overridden by module parameters, so use device settings */
+               struct _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data))
+               {
+                       /* Use device specific settings (if defined) */
+                       mali_fb_start = data.fb_start;
+                       mali_fb_size = data.fb_size;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+                                    mali_fb_size, mali_fb_start));
+       }
+       else
+       {
+               MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+                                    mali_fb_size, mali_fb_start));
+       }
+
+       if (0 != mali_fb_size)
+       {
+               /* Register frame buffer */
+               ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+               if (_MALI_OSK_ERR_OK != ret)
+               {
+                       MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+       _mali_osk_errcode_t err;
+
+       err = mali_session_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if defined(CONFIG_MALI400_PROFILING)
+       err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               /* No biggie if we wheren't able to initialize the profiling */
+               MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+       }
+#endif
+
+       err = mali_memory_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+       /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+       err = mali_parse_config_memory();
+       if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+       /* Initialize the MALI PMU */
+       err = mali_parse_config_pmu();
+       if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+       /* Initialize the power management module */
+       err = mali_pm_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+       /* Make sure the power stays on for the rest of this function */
+       err = _mali_osk_pm_dev_ref_add();
+       if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
+
+       /* Detect which Mali GPU we are dealing with */
+       err = mali_parse_product_info();
+       if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+       /* The global_product_id is now populated with the correct Mali GPU */
+
+       /* Initialize MMU module */
+       err = mali_mmu_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+       if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+       {
+               err = mali_dlbu_initialize();
+               if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+       }
+
+       /* Start configuring the actual Mali hardware. */
+       err = mali_parse_config_l2_cache();
+       if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+       err = mali_parse_config_groups();
+       if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+       /* Initialize the schedulers */
+       err = mali_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+       err = mali_gp_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+       err = mali_pp_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+       /* Initialize the GPU utilization tracking */
+       err = mali_utilization_init();
+       if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+
+       /* Allowing the system to be turned off */
+       _mali_osk_pm_dev_ref_dec();
+
+       MALI_SUCCESS; /* all ok */
+
+       /* Error handling */
+
+utilization_init_failed:
+       mali_pp_scheduler_terminate();
+pp_scheduler_init_failed:
+       mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+       mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+       mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
+dlbu_init_failed:
+       mali_dlbu_terminate();
+mmu_init_failed:
+       mali_mmu_terminate();
+       /* Nothing to roll back */
+product_info_parsing_failed:
+       /* Allowing the system to be turned off */
+       _mali_osk_pm_dev_ref_dec();
+pm_always_on_failed:
+       mali_pm_terminate();
+pm_init_failed:
+       {
+               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+               if (NULL != pmu)
+               {
+                       mali_pmu_delete(pmu);
+               }
+       }
+parse_pmu_config_failed:
+       /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
+parse_memory_config_failed:
+       mali_memory_terminate();
+memory_init_failed:
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_term();
+#endif
+       mali_session_terminate();
+session_init_failed:
+       return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+       /* shut down subsystems in reverse order from startup */
+
+       /* We need the GPU to be powered up for the terminate sequence */
+       _mali_osk_pm_dev_ref_add();
+
+       mali_utilization_term();
+       mali_pp_scheduler_terminate();
+       mali_gp_scheduler_terminate();
+       mali_scheduler_terminate();
+       mali_delete_l2_cache_cores();
+       if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+       {
+               mali_dlbu_terminate();
+       }
+       mali_mmu_terminate();
+       if (NULL != pmu)
+       {
+               mali_pmu_delete(pmu);
+       }
+       mali_pm_terminate();
+       mali_memory_terminate();
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_term();
+#endif
+
+       /* Allowing the system to be turned off */
+       _mali_osk_pm_dev_ref_dec();
+
+       mali_session_terminate();
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+       return global_product_id;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       /* check compatability */
+       if ( args->version == _MALI_UK_API_VERSION )
+       {
+               args->compatible = 1;
+       }
+       else
+       {
+               args->compatible = 0;
+       }
+
+       args->version = _MALI_UK_API_VERSION; /* report our version */
+
+       /* success regardless of being compatible or not */
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+       _mali_osk_errcode_t err;
+       _mali_osk_notification_t * notification;
+       _mali_osk_notification_queue_t *queue;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+       /* if the queue does not exist we're currently shutting down */
+       if (NULL == queue)
+       {
+               MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+               args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+               MALI_SUCCESS;
+       }
+
+       /* receive a notification, might sleep */
+       err = _mali_osk_notification_queue_receive(queue, &notification);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               MALI_ERROR(err); /* errcode returned, pass on to caller */
+       }
+
+       /* copy the buffer to the user */
+       args->type = (_mali_uk_notification_type)notification->notification_type;
+       _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+       /* finished with the notification */
+       _mali_osk_notification_delete( notification );
+
+       MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+       _mali_osk_notification_t * notification;
+       _mali_osk_notification_queue_t *queue;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+       /* if the queue does not exist we're currently shutting down */
+       if (NULL == queue)
+       {
+               MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+               MALI_SUCCESS;
+       }
+
+       notification = _mali_osk_notification_create(args->type, 0);
+       if (NULL == notification)
+       {
+               MALI_PRINT_ERROR( ("Failed to create notification object\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       _mali_osk_notification_queue_send(queue, notification);
+
+       MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+       struct mali_session_data *session;
+
+       /* allocated struct to track this session */
+       session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+       MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+       /* create a response queue for this session */
+       session->ioctl_queue = _mali_osk_notification_queue_init();
+       if (NULL == session->ioctl_queue)
+       {
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       session->page_directory = mali_mmu_pagedir_alloc();
+       if (NULL == session->page_directory)
+       {
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE))
+       {
+               MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       if (0 != mali_dlbu_phys_addr)
+       {
+               mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+                                       _MALI_OSK_MALI_PAGE_SIZE, MALI_CACHE_STANDARD);
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session))
+       {
+               mali_mmu_pagedir_free(session->page_directory);
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       _mali_osk_list_init(&session->pending_jobs);
+       session->pending_jobs_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK,
+                                                        0, _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS);
+       if (NULL == session->pending_jobs_lock)
+       {
+               MALI_PRINT_ERROR(("Failed to create pending jobs lock\n"));
+               mali_memory_session_end(session);
+               mali_mmu_pagedir_free(session->page_directory);
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+#endif
+#endif
+
+       *context = (void*)session;
+
+       /* Add session to the list of all sessions. */
+       mali_session_add(session);
+
+       /* Initialize list of jobs on this session */
+       _MALI_OSK_INIT_LIST_HEAD(&session->job_list);
+
+       MALI_DEBUG_PRINT(2, ("Session started\n"));
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+       struct mali_session_data *session;
+       MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+       session = (struct mali_session_data *)*context;
+
+       MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+       /* Remove session from list of all sessions. */
+       mali_session_remove(session);
+
+       /* Abort pending jobs */
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       {
+               _mali_osk_list_t tmp_job_list;
+               struct mali_pp_job *job, *tmp;
+               _MALI_OSK_INIT_LIST_HEAD(&tmp_job_list);
+
+               _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+               /* Abort asynchronous wait on fence. */
+               _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pending_jobs, struct mali_pp_job, list)
+               {
+                       MALI_DEBUG_PRINT(2, ("Sync: Aborting wait for session %x job %x\n", session, job));
+                       if (sync_fence_cancel_async(job->pre_fence, &job->sync_waiter))
+                       {
+                               MALI_DEBUG_PRINT(2, ("Sync: Failed to abort job %x\n", job));
+                       }
+                       _mali_osk_list_add(&job->list, &tmp_job_list);
+               }
+               _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+
+               _mali_osk_wq_flush();
+
+               _mali_osk_lock_term(session->pending_jobs_lock);
+
+               /* Delete jobs */
+               _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &tmp_job_list, struct mali_pp_job, list)
+               {
+                       mali_pp_job_delete(job);
+               }
+       }
+#endif
+#endif
+
+       /* Abort queued and running jobs */
+       mali_gp_scheduler_abort_session(session);
+       mali_pp_scheduler_abort_session(session);
+
+       /* Flush pending work.
+        * Needed to make sure all bottom half processing related to this
+        * session has been completed, before we free internal data structures.
+        */
+       _mali_osk_wq_flush();
+
+       /* Free remaining memory allocated to this session */
+       mali_memory_session_end(session);
+
+       /* Free session data structures */
+       mali_mmu_pagedir_free(session->page_directory);
+       _mali_osk_notification_queue_term(session->ioctl_queue);
+       _mali_osk_free(session);
+
+       *context = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+       MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char* buf, u32 size)
+{
+       int n = 0; /* Number of bytes written to buf */
+
+       n += mali_gp_scheduler_dump_state(buf + n, size - n);
+       n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+       return n;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_core.h b/drivers/gpu/arm/mali400/mali/common/mali_kernel_core.h
new file mode 100644 (file)
index 0000000..83add03
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+/* MALI_SEC */
+extern int mali_hang_check_interval;
+extern int mali_max_job_runtime;
+
+typedef enum
+{
+       _MALI_PRODUCT_ID_UNKNOWN,
+       _MALI_PRODUCT_ID_MALI200,
+       _MALI_PRODUCT_ID_MALI300,
+       _MALI_PRODUCT_ID_MALI400,
+       _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 _mali_kernel_core_dump_state(char* buf, u32 size);
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_descriptor_mapping.c b/drivers/gpu/arm/mali400/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644 (file)
index 0000000..af6de5a
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+       mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+       init_entries = MALI_PAD_INT(init_entries);
+       max_entries = MALI_PAD_INT(max_entries);
+
+       if (NULL != map)
+       {
+               map->table = descriptor_table_alloc(init_entries);
+               if (NULL != map->table)
+               {
+            map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+            if (NULL != map->lock)
+            {
+                           _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+                           map->max_nr_mappings_allowed = max_entries;
+                           map->current_nr_mappings = init_entries;
+                           return map;
+            }
+               descriptor_table_free(map->table);
+               }
+               _mali_osk_free(map);
+       }
+       return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+       descriptor_table_free(map->table);
+    _mali_osk_lock_term(map->lock);
+       _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       int new_descriptor;
+
+    MALI_DEBUG_ASSERT_POINTER(map);
+    MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+    _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+       if (new_descriptor == map->current_nr_mappings)
+       {
+               /* no free descriptor, try to expand the table */
+               mali_descriptor_table * new_table, * old_table;
+               if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+        map->current_nr_mappings += BITS_PER_LONG;
+               new_table = descriptor_table_alloc(map->current_nr_mappings);
+               if (NULL == new_table) goto unlock_and_exit;
+
+        old_table = map->table;
+               _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+               _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+               map->table = new_table;
+               descriptor_table_free(old_table);
+       }
+
+       /* we have found a valid descriptor, set the value and usage bit */
+       _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+       map->table->mappings[new_descriptor] = target;
+       *odescriptor = new_descriptor;
+    err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+    _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+    MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(map);
+       MALI_DEBUG_ASSERT_POINTER(callback);
+
+    _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       /* id 0 is skipped as it's an reserved ID not mapping to anything */
+       for (i = 1; i < map->current_nr_mappings; ++i)
+       {
+               if (_mali_osk_test_bit(i, map->table->usage))
+               {
+                       callback(i, map->table->mappings[i]);
+               }
+       }
+    _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+       _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+       MALI_DEBUG_ASSERT_POINTER(map);
+    _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+       {
+               *target = map->table->mappings[descriptor];
+               result = _MALI_OSK_ERR_OK;
+       }
+       else *target = NULL;
+    _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+       MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+       _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+    _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+       {
+               map->table->mappings[descriptor] = target;
+               result = _MALI_OSK_ERR_OK;
+       }
+    _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+       MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+       void *old_value = NULL;
+
+    _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+       {
+               old_value = map->table->mappings[descriptor];
+               map->table->mappings[descriptor] = NULL;
+               _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+       }
+    _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+       return old_value;
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+       mali_descriptor_table * table;
+
+       table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+       if (NULL != table)
+       {
+               table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+               table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+       }
+
+       return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+       _mali_osk_free(table);
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_descriptor_mapping.h b/drivers/gpu/arm/mali400/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644 (file)
index 0000000..2829173
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+       u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+       void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+    _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+       int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+       int current_nr_mappings; /**< Current number of possible mappings */
+       mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_mem_os.c b/drivers/gpu/arm/mali400/mali/common/mali_kernel_mem_os.c
new file mode 100644 (file)
index 0000000..712970c
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+       u32 num_pages;
+       u32 offset_start;
+       mali_allocation_engine * engine;
+       mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+       _mali_osk_lock_t *mutex;
+
+       /**
+        * Maximum number of pages to allocate from the OS
+        */
+       u32 num_pages_max;
+
+       /**
+        * Number of pages allocated from the OS
+        */
+       u32 num_pages_allocated;
+
+       /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+       u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine,  mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+       mali_physical_memory_allocator * allocator;
+       os_allocator * info;
+
+       max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+       MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+       allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+       if (NULL != allocator)
+       {
+               info = _mali_osk_malloc(sizeof(os_allocator));
+               if (NULL != info)
+               {
+                       info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+                       info->num_pages_allocated = 0;
+                       info->cpu_usage_adjust = cpu_usage_adjust;
+
+                       info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+            if (NULL != info->mutex)
+            {
+                           allocator->allocate = os_allocator_allocate;
+                           allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+                           allocator->destroy = os_allocator_destroy;
+                               allocator->stat = os_allocator_stat;
+                           allocator->ctx = info;
+                               allocator->name = name;
+
+                           return allocator;
+            }
+            _mali_osk_free(info);
+               }
+               _mali_osk_free(allocator);
+       }
+
+       return NULL;
+}
+
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+       os_allocator * info;
+       info = (os_allocator*)allocator->ctx;
+       return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+       os_allocator * info;
+        MALI_DEBUG_ASSERT_POINTER(allocator);
+       MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+       info = (os_allocator*)allocator->ctx;
+       _mali_osk_lock_term(info->mutex);
+       _mali_osk_free(info);
+       _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine,  mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+       mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+       u32 left;
+       os_allocator * info;
+       os_allocation * allocation;
+       int pages_allocated = 0;
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(offset);
+       MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+       info = (os_allocator*)ctx;
+       left = descriptor->size - *offset;
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+       /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+       allocation = _mali_osk_malloc(sizeof(os_allocation));
+       if (NULL != allocation)
+       {
+               u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+               allocation->offset_start = *offset;
+               allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+               MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+/* MALI_SEC */
+/*
+while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+*/
+               while (left > 0)
+               {
+                       err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+                       if ( _MALI_OSK_ERR_OK != err)
+                       {
+                               if (  _MALI_OSK_ERR_NOMEM == err)
+                               {
+                                       /* 'Partial' allocation (or, out-of-memory on first page) */
+                                       break;
+                               }
+
+                               MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+                               /* Fatal error, cleanup any previous pages allocated. */
+                               if ( pages_allocated > 0 )
+                               {
+                                       mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+                                       /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+                               }
+
+                               pages_allocated = 0;
+
+                               result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+                               break;
+                       }
+
+                       /* Loop iteration */
+                       if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+                       else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+                       pages_allocated++;
+
+                       *offset += _MALI_OSK_CPU_PAGE_SIZE;
+               }
+
+               if (left) MALI_PRINT(("Out of memory. Mali memory allocated: %d kB  Configured maximum OS memory usage: %d kB\n",
+                                (info->num_pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+               /* Loop termination; decide on result */
+               if (pages_allocated)
+               {
+                       MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+                       if (left) result = MALI_MEM_ALLOC_PARTIAL;
+                       else result = MALI_MEM_ALLOC_FINISHED;
+
+            /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+             * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+             * This is required for MALI to have the correct view of the memory.
+             */
+            _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+                       allocation->num_pages = pages_allocated;
+                       allocation->engine = engine;         /* Necessary to make the engine's unmap call */
+                       allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+                       info->num_pages_allocated += pages_allocated;
+
+                       MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+                       alloc_info->ctx = info;
+                       alloc_info->handle = allocation;
+                       alloc_info->release = os_allocator_release;
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+                       _mali_osk_free( allocation );
+               }
+       }
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+       return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+       os_allocator * info;
+       os_allocation * allocation;
+       mali_allocation_engine * engine;
+       mali_memory_allocation * descriptor;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(handle);
+
+       info = (os_allocator*)ctx;
+       allocation = (os_allocation*)handle;
+       engine = allocation->engine;
+       descriptor = allocation->descriptor;
+
+       MALI_DEBUG_ASSERT_POINTER( engine );
+       MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+       {
+               MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+               return;
+       }
+
+       MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+       MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+       info->num_pages_allocated -= allocation->num_pages;
+
+       mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+       _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+/* MALI_SEC  6->10 */
+#ifndef CONFIG_FORCE_MAX_ZONEORDER
+       int allocation_order = 10;
+#else
+       int allocation_order = CONFIG_FORCE_MAX_ZONEORDER - 1;
+#endif
+       void *virt = NULL;
+       u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+       os_allocator * info;
+
+       u32 cpu_phys_base;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       info = (os_allocator*)ctx;
+
+       /* Ensure we don't allocate more than we're supposed to from the ctx */
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+       /* if the number of pages to be requested lead to exceeding the memory
+        * limit in info->num_pages_max, reduce the size that is to be requested. */
+       while ( (info->num_pages_allocated + (1 << allocation_order) > info->num_pages_max)
+               && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+       {
+               if ( allocation_order > 0 ) {
+                       --allocation_order;
+               } else {
+                       /* return OOM */
+                       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+                       return MALI_MEM_ALLOC_NONE;
+               }
+       }
+
+       /* try to allocate 2^(allocation_order) pages, if that fails, try
+        * allocation_order-1 to allocation_order 0 (inclusive) */
+       while ( allocation_order >= 0 )
+       {
+               size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+               virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+               if (NULL != virt) break;
+
+               --allocation_order;
+       }
+
+       if ( NULL == virt )
+       {
+               MALI_DEBUG_PRINT(1, ("Failed to allocate consistent memory. Is CONSISTENT_DMA_SIZE set too low?\n"));
+               /* return OOM */
+               _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+               return MALI_MEM_ALLOC_NONE;
+       }
+
+       MALI_DEBUG_PRINT(5, ("os_allocator_allocate_page_table_block: Allocation of order %i succeeded\n",
+                               allocation_order));
+
+       /* we now know the size of the allocation since we know for what
+        * allocation_order the allocation succeeded */
+       size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+
+
+       block->release = os_allocator_page_table_block_release;
+       block->ctx = ctx;
+       block->handle = (void*)allocation_order;
+       block->size = size;
+       block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+       block->mapping = virt;
+
+       info->num_pages_allocated += (1 << allocation_order);
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+       return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+       os_allocator * info;
+       u32 allocation_order;
+       u32 pages_allocated;
+
+       MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+       info = (os_allocator*)page_table_block->ctx;
+
+       MALI_DEBUG_ASSERT_POINTER( info );
+
+       allocation_order = (u32)page_table_block->handle;
+
+       pages_allocated = 1 << allocation_order;
+
+       MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+       {
+               MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+               return;
+       }
+
+       MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+       info->num_pages_allocated -= pages_allocated;
+
+       /* Adjust phys_base from mali physical address to CPU physical address */
+       _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+       _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_mem_os.h b/drivers/gpu/arm/mali400/mali/common/mali_kernel_mem_os.h
new file mode 100644 (file)
index 0000000..66c0a58
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_memory_engine.c b/drivers/gpu/arm/mali400/mali/common/mali_kernel_memory_engine.c
new file mode 100644 (file)
index 0000000..f0a0c97
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+       mali_kernel_mem_address_manager * mali_address;
+       mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+       memory_engine * engine;
+
+       /* Mali Address Manager need not support unmap_physical */
+       MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+       MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+       MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+       MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+       /* Process Address Manager must support unmap_physical for OS allocation
+        * error path handling */
+       MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+       MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+       MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+       MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+       MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+       engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+       if (NULL == engine) return NULL;
+
+       engine->mali_address = mali_address_manager;
+       engine->process_address = process_address_manager;
+
+       return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+       memory_engine * engine = (memory_engine*)mem_engine;
+
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+       /* ASSERT that the list member has been initialized, even if it won't be
+        * used for tracking. We need it to be initialized to see if we need to
+        * delete it from a list in the release function. */
+       MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+       if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+       {
+               _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+               if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+               {
+                       res = engine->process_address->allocate(descriptor);
+               }
+               if ( _MALI_OSK_ERR_OK == res )
+               {
+                       /* address space setup OK, commit physical memory to the allocation */
+                       mali_physical_memory_allocator * active_allocator = physical_allocators;
+                       struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+                       u32 offset = 0;
+
+                       while ( NULL != active_allocator )
+                       {
+                               switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+                               {
+                                       case MALI_MEM_ALLOC_FINISHED:
+                                               if ( NULL != tracking_list )
+                                               {
+                                                       /* Insert into the memory session list */
+                                                       /* ASSERT that it is not already part of a list */
+                                                       MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+                                                       _mali_osk_list_add( &descriptor->list, tracking_list );
+                                               }
+
+                                               MALI_SUCCESS; /* all done */
+                                       case MALI_MEM_ALLOC_NONE:
+                                               /* reuse current active_allocation_tracker */
+                                               MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+                                                                                         ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+                                                                                         ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+                                               active_allocator = active_allocator->next;
+                                               break;
+                                       case MALI_MEM_ALLOC_PARTIAL:
+                                               if (NULL != active_allocator->next)
+                                               {
+                                                       /* need a new allocation tracker */
+                                                       active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+                                                       if (NULL != active_allocation_tracker->next)
+                                                       {
+                                                               active_allocation_tracker = active_allocation_tracker->next;
+                                                               MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+                                                                                                         ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+                                                                                                         ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+                                                               active_allocator = active_allocator->next;
+                                                               break;
+                                                       }
+                                               }
+                                          /* FALL THROUGH */
+                                       case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+                                          active_allocator = NULL; /* end the while loop */
+                                          break;
+                               }
+                       }
+
+                       MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));
+
+                       /* allocation failure, start cleanup */
+                       /* loop over any potential partial allocations */
+                       active_allocation_tracker = &descriptor->physical_allocation;
+                       while (NULL != active_allocation_tracker)
+                       {
+                               /* handle blank trackers which will show up during failure */
+                               if (NULL != active_allocation_tracker->release)
+                               {
+                                       active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+                               }
+                               active_allocation_tracker = active_allocation_tracker->next;
+                       }
+
+                       /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+                       for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+                       {
+                               void * buf = active_allocation_tracker;
+                               active_allocation_tracker = active_allocation_tracker->next;
+                               _mali_osk_free(buf);
+                       }
+
+                       /* release the address spaces */
+
+                       if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+                       {
+                               engine->process_address->release(descriptor);
+                       }
+               }
+               engine->mali_address->release(descriptor);
+       }
+
+       MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+       mali_allocation_engine_release_pt1_mali_pagetables_unmap(mem_engine, descriptor);
+       mali_allocation_engine_release_pt2_physical_memory_free(mem_engine, descriptor);
+}
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+       memory_engine * engine = (memory_engine*)mem_engine;
+
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       /* Calling: mali_address_manager_release()  */
+       /* This function is allowed to be called several times, and it only does the release on the first call. */
+       engine->mali_address->release(descriptor);
+}
+
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+       memory_engine * engine = (memory_engine*)mem_engine;
+       mali_physical_memory_allocation * active_allocation_tracker;
+
+       /* Remove this from a tracking list in session_data->memory_head */
+       if ( ! _mali_osk_list_empty( &descriptor->list ) )
+       {
+               _mali_osk_list_del( &descriptor->list );
+               /* Clear the list for debug mode, catch use-after-free */
+               MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+       }
+
+       active_allocation_tracker = &descriptor->physical_allocation;
+       while (NULL != active_allocation_tracker)
+       {
+               MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+               active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+               active_allocation_tracker = active_allocation_tracker->next;
+       }
+
+       /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+       for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+       {
+               void * buf = active_allocation_tracker;
+               active_allocation_tracker = active_allocation_tracker->next;
+               _mali_osk_free(buf);
+       }
+
+       if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+       {
+               engine->process_address->release(descriptor);
+       }
+}
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+       _mali_osk_errcode_t err;
+       memory_engine * engine = (memory_engine*)mem_engine;
+       _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+       MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+       MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+       /* Handle process address manager first, because we may need them to
+        * allocate the physical page */
+       if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+       {
+               /* Handle OS-allocated specially, since an adjustment may be required */
+               if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+               {
+                       MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+                       /* Set flags to use on error path */
+                       unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+                       err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+                       /* Adjust for cpu physical address to mali physical address */
+                       phys -= cpu_usage_adjust;
+               }
+               else
+               {
+                       u32 cpu_phys;
+                       /* Adjust mali physical address to cpu physical address */
+                       cpu_phys = phys + cpu_usage_adjust;
+                       err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+               }
+
+               if ( _MALI_OSK_ERR_OK != err )
+               {
+                       MALI_DEBUG_PRINT(2, ("Map failed: %s %d\n", __FUNCTION__, __LINE__));
+                       MALI_ERROR( err );
+               }
+       }
+
+       MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+       /* Mali address manager must use the physical address - no point in asking
+        * it to allocate another one for us */
+       MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+       err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+       if ( _MALI_OSK_ERR_OK != err )
+       {
+               if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+               {
+                       MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+                       engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+               }
+               MALI_DEBUG_PRINT(2, ("Map mali failed: %s %d\n", __FUNCTION__, __LINE__));
+               MALI_ERROR( err );
+       }
+
+       MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+       memory_engine * engine = (memory_engine*)mem_engine;
+
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+       MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+       MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+       if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+       {
+               /* Mandetory for process_address manager to have an unmap function*/
+               engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+       }
+
+       /* Optional for mali_address manager to have an unmap function*/
+       if ( NULL != engine->mali_address->unmap_physical )
+       {
+               engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+       }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+       mali_physical_memory_allocator * active_allocator = physical_provider;
+
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+       while ( NULL != active_allocator )
+       {
+               switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+               {
+                       case MALI_MEM_ALLOC_FINISHED:
+                               MALI_SUCCESS; /* all done */
+                       case MALI_MEM_ALLOC_NONE:
+                               /* try next */
+                               MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+                                                                         ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+                                                                         ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+                               active_allocator = active_allocator->next;
+                               break;
+                       case MALI_MEM_ALLOC_PARTIAL:
+                               MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+                               /* FALL THROUGH */
+                       case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+                               MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+                               active_allocator = NULL; /* end the while loop */
+                               break;
+               }
+       }
+
+       MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+       mali_physical_memory_allocator * active_allocator = physical_provider;
+       MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+       MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+       while ( NULL != active_allocator )
+       {
+               if ( NULL != active_allocator->name )
+               {
+                       MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+               }
+               active_allocator = active_allocator->next;
+       }
+
+}
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator)
+{
+       u32 sum = 0;
+       while(NULL != allocator)
+       {
+               /* Only count allocators that have set up a stat function. */
+               if(allocator->stat)
+                       sum += allocator->stat(allocator);
+
+               allocator = allocator->next;
+       }
+
+       return sum;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_memory_engine.h b/drivers/gpu/arm/mali400/mali/common/mali_kernel_memory_engine.h
new file mode 100644 (file)
index 0000000..cf3dfac
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define  __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+       void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+       void * ctx;
+       void * handle;
+       struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+       void (*release)(struct mali_page_table_block *page_table_block);
+       void * ctx;
+       void * handle;
+       u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+       u32 phys_base; /**< Mali physical address */
+       mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+       MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+       MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE     = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than committing a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+       /* Information about the allocation */
+       void * mapping; /**< CPU virtual address where the memory is mapped at */
+       u32 mali_address; /**< The Mali seen address of the memory allocation */
+       u32 size; /**< Size of the allocation */
+       u32 permission; /**< Permission settings */
+       mali_memory_allocation_flag flags;
+       u32 cache_settings; /* type: mali_memory_cache_settings, found in <linux/mali/mali_utgard_uk_types.h> Ump DD breaks if we include it...*/
+
+       _mali_osk_lock_t * lock;
+
+       /* Manager specific information pointers */
+       void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+       void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+       mali_physical_memory_allocation physical_allocation;
+
+       _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+       mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+       mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+       void (*destroy)(struct mali_physical_memory_allocator * allocator);
+       u32 (*stat)(struct mali_physical_memory_allocator * allocator);
+       void * ctx;
+       const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+       u32 alloc_order; /**< Order in which the allocations should happen */
+       struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+       _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+       void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+        /**
+         * Function called for each physical sub allocation.
+         * Called for each physical block allocated by the physical memory manager.
+         * @param[in] descriptor The memory descriptor in question
+         * @param[in] off Offset from the start of range
+         * @param[in,out] phys_addr A pointer to the physical address of the start of the
+         * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+         * is used, this requests the function to allocate the physical page
+         * itself, and return it through the pointer provided.
+         * @param[in] size Length in bytes of the physical block
+         * @return _MALI_OSK_ERR_OK on success.
+         * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+         * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+         * does not support allocating physical pages itself.
+         */
+        _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+        /**
+         * Function called to remove a physical sub allocation.
+         * Called on error paths where one of the address managers fails.
+         *
+         * @note this is optional. For address managers where this is not
+         * implemented, the value of this member is NULL. The memory engine
+         * currently does not require the mali address manager to be able to
+         * unmap individual pages, but the process address manager must have this
+         * capability.
+         *
+         * @param[in] descriptor The memory descriptor in question
+         * @param[in] off Offset from the start of range
+         * @param[in] size Length in bytes of the physical block
+         * @param[in] flags flags to use on a per-page basis. For OS-allocated
+         * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+         * @return _MALI_OSK_ERR_OK on success.
+         * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+         */
+       void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_utilization.c b/drivers/gpu/arm/mali400/mali/common/mali_kernel_utilization.c
new file mode 100644 (file)
index 0000000..4d48e86
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 last_utilization = 0 ;
+
+static u32 mali_utilization_timeout = 1000;
+static int gpu_entry_count = 0;
+void (*mali_utilization_callback)(unsigned int) = NULL;
+
+static void calculate_gpu_utilization(void* arg)
+{
+       u64 time_now;
+       u64 time_period;
+       u32 leading_zeroes;
+       u32 shift_val;
+       u32 work_normalized;
+       u32 period_normalized;
+       u32 utilization;
+
+       _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (accumulated_work_time == 0 && work_start_time == 0)
+       {
+               /* Don't reschedule timer, this will be started if new work arrives */
+               timer_running = MALI_FALSE;
+
+               _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+               /* No work done for this period, report zero usage */
+               if (NULL != mali_utilization_callback)
+               {
+                       mali_utilization_callback(0);
+               }
+
+               return;
+       }
+
+       time_now = _mali_osk_time_get_ns();
+       time_period = time_now - period_start_time;
+
+       /* If we are currently busy, update working period up to now */
+       if (work_start_time != 0)
+       {
+               accumulated_work_time += (time_now - work_start_time);
+               work_start_time = time_now;
+       }
+
+       /*
+        * We have two 64-bit values, a dividend and a divisor.
+        * To avoid dependencies to a 64-bit divider, we shift down the two values
+        * equally first.
+        * We shift the dividend up and possibly the divisor down, making the result X in 256.
+        */
+
+       /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+       leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+       shift_val = 32 - leading_zeroes;
+       work_normalized = (u32)(accumulated_work_time >> shift_val);
+       period_normalized = (u32)(time_period >> shift_val);
+
+       /*
+        * Now, we should report the usage in parts of 256
+        * this means we must shift up the dividend or down the divisor by 8
+        * (we could do a combination, but we just use one for simplicity,
+        * but the end result should be good enough anyway)
+        */
+       if (period_normalized > 0x00FFFFFF)
+       {
+               /* The divisor is so big that it is safe to shift it down */
+               period_normalized >>= 8;
+       }
+       else
+       {
+               /*
+                * The divisor is so small that we can shift up the dividend, without loosing any data.
+                * (dividend is always smaller than the divisor)
+                */
+               work_normalized <<= 8;
+       }
+
+       utilization = work_normalized / period_normalized;
+
+       last_utilization = utilization;
+
+       accumulated_work_time = 0;
+       period_start_time = time_now; /* starting a new period */
+
+       _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (gpu_entry_count <= 3) {
+               gpu_entry_count++;
+               _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks((mali_utilization_timeout/2)));
+       } else {
+               _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+       }
+
+       if (NULL != mali_utilization_callback)
+       {
+               mali_utilization_callback(utilization);
+       }
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+       struct _mali_osk_device_data data;
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data))
+       {
+               /* Use device specific settings (if defined) */
+               if (0 != data.utilization_interval)
+               {
+                       mali_utilization_timeout = data.utilization_interval;
+               }
+               if (NULL != data.utilization_handler)
+               {
+                       mali_utilization_callback = data.utilization_handler;
+               }
+       }
+#endif
+
+       if (NULL != mali_utilization_callback)
+       {
+               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+       }
+       else
+       {
+               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+       }
+
+       time_data_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |
+                                            _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+       if (NULL == time_data_lock)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       _mali_osk_atomic_init(&num_running_cores, 0);
+
+       utilization_timer = _mali_osk_timer_init();
+       if (NULL == utilization_timer)
+       {
+               _mali_osk_lock_term(time_data_lock);
+               return _MALI_OSK_ERR_FAULT;
+       }
+       _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+       if (NULL != utilization_timer)
+       {
+               _mali_osk_timer_del(utilization_timer);
+               timer_running = MALI_FALSE;
+       }
+}
+
+void mali_utilization_term(void)
+{
+       if (NULL != utilization_timer)
+       {
+               _mali_osk_timer_del(utilization_timer);
+               timer_running = MALI_FALSE;
+               _mali_osk_timer_term(utilization_timer);
+               utilization_timer = NULL;
+       }
+
+       _mali_osk_atomic_term(&num_running_cores);
+
+       _mali_osk_lock_term(time_data_lock);
+}
+
+void mali_utilization_core_start(u64 time_now)
+{
+       if (_mali_osk_atomic_inc_return(&num_running_cores) == 1)
+       {
+               /*
+                * We went from zero cores working, to one core working,
+                * we now consider the entire GPU for being busy
+                */
+
+               _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+               if (time_now < period_start_time)
+               {
+                       /*
+                        * This might happen if the calculate_gpu_utilization() was able
+                        * to run between the sampling of time_now and us grabbing the lock above
+                        */
+                       time_now = period_start_time;
+               }
+
+               work_start_time = time_now;
+               if (timer_running != MALI_TRUE)
+               {
+                       timer_running = MALI_TRUE;
+                       period_start_time = work_start_time; /* starting a new period */
+
+                       _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+                       gpu_entry_count = 0;
+                       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks((mali_utilization_timeout/2)));
+               }
+               else
+               {
+                       _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+               }
+       }
+}
+
+void mali_utilization_core_end(u64 time_now)
+{
+       if (_mali_osk_atomic_dec_return(&num_running_cores) == 0)
+       {
+               /*
+                * No more cores are working, so accumulate the time we was busy.
+                */
+               _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+               if (time_now < work_start_time)
+               {
+                       /*
+                        * This might happen if the calculate_gpu_utilization() was able
+                        * to run between the sampling of time_now and us grabbing the lock above
+                        */
+                       time_now = work_start_time;
+               }
+
+               accumulated_work_time += (time_now - work_start_time);
+               work_start_time = 0;
+
+               _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+       }
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+       return last_utilization;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+       return last_utilization;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+       return last_utilization;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_utilization.h b/drivers/gpu/arm/mali400/mali/common/mali_kernel_utilization.h
new file mode 100644 (file)
index 0000000..0769d08
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+extern void (*mali_utilization_callback)(unsigned int);
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
+{
+       return (NULL != mali_utilization_callback);
+}
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(u64 time_now);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(u64 time_now);
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_kernel_vsync.c b/drivers/gpu/arm/mali400/mali/common/mali_kernel_vsync.c
new file mode 100644 (file)
index 0000000..375f378
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+       _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+       MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if defined(CONFIG_MALI400_PROFILING)
+       /*
+        * Manually generate user space events in kernel space.
+        * This saves user space from calling kernel space twice in this case.
+        * We just need to remember to add pid and tid manually.
+        */
+       if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
+       {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+       }
+
+       if (event==_MALI_UK_VSYNC_EVENT_END_WAIT)
+       {
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+       }
+#endif
+
+       MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+       MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_l2_cache.c b/drivers/gpu/arm/mali400/mali/common/mali_l2_cache.c
new file mode 100644 (file)
index 0000000..de42aa8
--- /dev/null
@@ -0,0 +1,474 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+       MALI400_L2_CACHE_REGISTER_STATUS       = 0x0008,
+       /*unused                               = 0x000C */
+       MALI400_L2_CACHE_REGISTER_COMMAND      = 0x0010, /**< Misc cache commands, e.g. clear */
+       MALI400_L2_CACHE_REGISTER_CLEAR_PAGE   = 0x0014,
+       MALI400_L2_CACHE_REGISTER_MAX_READS    = 0x0018, /**< Limit of outstanding read requests */
+       MALI400_L2_CACHE_REGISTER_ENABLE       = 0x001C, /**< Enable misc cache features */
+       MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+       MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+       /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+       MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+       MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+       MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+       MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+       MALI400_L2_CACHE_STATUS_DATA_BUSY    = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core
+{
+       struct mali_hw_core  hw_core;      /**< Common for all HW cores */
+       u32                  core_id;      /**< Unique core ID */
+       _mali_osk_lock_t    *command_lock; /**< Serialize all L2 cache commands */
+       _mali_osk_lock_t    *counter_lock; /**< Synchronize L2 cache counter access */
+       u32                  counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32                  counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32                  last_invalidated_id;
+       mali_bool            power_is_enabled;
+};
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+       struct mali_l2_cache_core *cache = NULL;
+       _mali_osk_lock_flags_t lock_flags;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#else
+       lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#endif
+
+       MALI_DEBUG_PRINT(2, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+       if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES)
+       {
+               MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+               return NULL;
+       }
+
+       cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+       if (NULL != cache)
+       {
+               cache->core_id =  mali_global_num_l2_cache_cores;
+               cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+               cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE))
+               {
+                       cache->command_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+                       if (NULL != cache->command_lock)
+                       {
+                               cache->counter_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COUNTER);
+                               if (NULL != cache->counter_lock)
+                               {
+                                       mali_l2_cache_reset(cache);
+
+                                       cache->last_invalidated_id = 0;
+                                       cache->power_is_enabled = MALI_TRUE;
+
+                                       mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+                                       mali_global_num_l2_cache_cores++;
+
+                                       return cache;
+                               }
+                               else
+                               {
+                                       MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+                               }
+
+                               _mali_osk_lock_term(cache->command_lock);
+                       }
+                       else
+                       {
+                               MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+                       }
+
+                       mali_hw_core_delete(&cache->hw_core);
+               }
+
+               _mali_osk_free(cache);
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+       u32 i;
+
+       /* reset to defaults */
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+       _mali_osk_lock_term(cache->counter_lock);
+       _mali_osk_lock_term(cache->command_lock);
+       mali_hw_core_delete(&cache->hw_core);
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_L2_CACHE_CORES; i++)
+       {
+               if (mali_global_l2_cache_cores[i] == cache)
+               {
+                       mali_global_l2_cache_cores[i] = NULL;
+                       mali_global_num_l2_cache_cores--;
+               }
+       }
+
+       _mali_osk_free(cache);
+}
+
+void mali_l2_cache_power_is_enabled_set(struct mali_l2_cache_core * core, mali_bool power_is_enabled)
+{
+       core->power_is_enabled = power_is_enabled;
+}
+
+mali_bool mali_l2_cache_power_is_enabled_get(struct mali_l2_cache_core * core)
+{
+       return core->power_is_enabled;
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+       return cache->core_id;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+       u32 value = 0; /* disabled src */
+       mali_bool core_is_on;
+
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       core_is_on = mali_l2_cache_lock_power_state(cache);
+
+       _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+       cache->counter_src0 = counter;
+
+       if (MALI_HW_CORE_NO_COUNTER != counter)
+       {
+               value = counter;
+       }
+
+       if (MALI_TRUE == core_is_on)
+       {
+               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+       }
+
+       _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+       mali_l2_cache_unlock_power_state(cache);
+
+       return MALI_TRUE;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+       u32 value = 0; /* disabled src */
+       mali_bool core_is_on;
+
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       core_is_on = mali_l2_cache_lock_power_state(cache);
+
+       _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+       cache->counter_src1 = counter;
+
+       if (MALI_HW_CORE_NO_COUNTER != counter)
+       {
+               value = counter;
+       }
+
+       if (MALI_TRUE == core_is_on)
+       {
+               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+       }
+
+       _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+       mali_l2_cache_unlock_power_state(cache);
+
+       return MALI_TRUE;
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+       return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+       return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+       MALI_DEBUG_ASSERT(NULL != src0);
+       MALI_DEBUG_ASSERT(NULL != value0);
+       MALI_DEBUG_ASSERT(NULL != src1);
+       MALI_DEBUG_ASSERT(NULL != value1);
+
+       /* Caller must hold the PM lock and know that we are powered on */
+
+       _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+       *src0 = cache->counter_src0;
+       *src1 = cache->counter_src1;
+
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+       {
+               *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
+       {
+               *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+       }
+
+       _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+       if (MALI_MAX_NUMBER_OF_L2_CACHE_CORES > index)
+       {
+               return mali_global_l2_cache_cores[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+       return mali_global_num_l2_cache_cores;
+}
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+       /* Invalidate cache (just to keep it in a known state at startup) */
+       mali_l2_cache_invalidate_all(cache);
+
+       /* Enable cache */
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+       /* Restart any performance counters (if enabled) */
+       _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+       {
+               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
+       {
+               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+       }
+
+       _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_l2_cache_reset_all(void)
+{
+       int i;
+       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+       for (i = 0; i < num_cores; i++)
+       {
+               mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
+       }
+}
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache)
+{
+       return mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+mali_bool mali_l2_cache_invalidate_all_conditional(struct mali_l2_cache_core *cache, u32 id)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       if (NULL != cache)
+       {
+               /* If the last cache invalidation was done by a job with a higher id we
+                * don't have to flush. Since user space will store jobs w/ their
+                * corresponding memory in sequence (first job #0, then job #1, ...),
+                * we don't have to flush for job n-1 if job n has already invalidated
+                * the cache since we know for sure that job n-1's memory was already
+                * written when job n was started. */
+               if (((s32)id) <= ((s32)cache->last_invalidated_id))
+               {
+                       return MALI_FALSE;
+               }
+               else
+               {
+                       cache->last_invalidated_id = mali_scheduler_get_new_id();
+               }
+
+               mali_l2_cache_invalidate_all(cache);
+       }
+       return MALI_TRUE;
+}
+
+void mali_l2_cache_invalidate_all_force(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       if (NULL != cache)
+       {
+               cache->last_invalidated_id = mali_scheduler_get_new_id();
+               mali_l2_cache_invalidate_all(cache);
+       }
+}
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages)
+{
+       u32 i;
+       _mali_osk_errcode_t ret1, ret = _MALI_OSK_ERR_OK;
+
+       for (i = 0; i < num_pages; i++)
+       {
+               ret1 = mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[i]);
+               if (_MALI_OSK_ERR_OK != ret1)
+               {
+                       ret = ret1;
+               }
+       }
+
+       return ret;
+}
+
+void mali_l2_cache_invalidate_pages_conditional(u32 *pages, u32 num_pages)
+{
+       u32 i;
+
+       for (i = 0; i < mali_global_num_l2_cache_cores; i++)
+       {
+               /*additional check*/
+               if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
+               {
+                       mali_l2_cache_invalidate_pages(mali_global_l2_cache_cores[i], pages, num_pages);
+               }
+               mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+               /*check for failed power locking???*/
+       }
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+       return _mali_osk_pm_dev_ref_add_no_power_on();
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+       int i = 0;
+       const int loop_count = 100000;
+
+       /*
+        * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+        * The L2 cache will ignore commands if it is busy.
+        */
+       _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+       /* First, wait for L2 cache command handler to go idle */
+
+       for (i = 0; i < loop_count; i++)
+       {
+               if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+               {
+                       break;
+               }
+       }
+
+       if (i == loop_count)
+       {
+               _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+               MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+               MALI_ERROR( _MALI_OSK_ERR_FAULT );
+       }
+
+       /* then issue the command */
+       mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+       _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_l2_cache.h b/drivers/gpu/arm/mali400/mali/common/mali_l2_cache.h
new file mode 100644 (file)
index 0000000..ce3f1e9
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES  3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+struct mali_l2_cache_core;
+struct mali_group;
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t * resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_power_is_enabled_set(struct mali_l2_cache_core *core, mali_bool power_is_enabled);
+mali_bool mali_l2_cache_power_is_enabled_get(struct mali_l2_cache_core * core);
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+void mali_l2_cache_reset_all(void);
+
+struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache);
+mali_bool mali_l2_cache_invalidate_all_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_all_force(struct mali_l2_cache_core *cache);
+_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages);
+void mali_l2_cache_invalidate_pages_conditional(u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_mem_validation.c b/drivers/gpu/arm/mali400/mali/common/mali_mem_validation.c
new file mode 100644 (file)
index 0000000..7281a13
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct
+{
+       u32 phys_base;        /**< Mali physical base of the memory, page aligned */
+       u32 size;             /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+       /* Check that no other MEM_VALIDATION resources exist */
+       if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base)
+       {
+               MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Check restrictions on page alignment */
+       if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+           (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK))))
+       {
+               MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mali_mem_validator.phys_base = start;
+       mali_mem_validator.size = size;
+       MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+                        mali_mem_validator.phys_base, mali_mem_validator.size));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+       if (phys_addr < (phys_addr + size)) /* Don't allow overflow (or zero size) */
+       {
+               if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+                       (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK))))
+               {
+                       if ((phys_addr          >= mali_mem_validator.phys_base) &&
+                               ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+                               (phys_addr          <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+                               ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) )
+                       {
+                               MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+                               return _MALI_OSK_ERR_OK;
+                       }
+               }
+       }
+
+       MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+       return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_mem_validation.h b/drivers/gpu/arm/mali400/mali/common/mali_mem_validation.h
new file mode 100644 (file)
index 0000000..e020429
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_memory.c b/drivers/gpu/arm/mali400/mali/common/mali_memory.c
new file mode 100644 (file)
index 0000000..1fccb86
--- /dev/null
@@ -0,0 +1,1297 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_mem_validation.h"
+#include "mali_memory.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_session.h"
+#include "mali_l2_cache.h"
+#include "mali_scheduler.h"
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_list.h"
+#include "mali_osk_bitops.h"
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+typedef struct dedicated_memory_info
+{
+       u32 base;
+       u32 size;
+       struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if defined(CONFIG_MALI400_UMP)
+typedef struct ump_mem_allocation
+{
+       mali_allocation_engine * engine;
+       mali_memory_allocation * descriptor;
+       u32 initial_offset;
+       u32 size_allocated;
+       ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+       mali_allocation_engine * engine;
+       mali_memory_allocation * descriptor;
+       u32 initial_offset;
+       u32 size;
+} external_mem_allocation;
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * This function will fail if it is unable to put the MMU in stall mode (which
+ * might be the case if a page fault is also being processed).
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+#if defined(CONFIG_MALI400_UMP)
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* CONFIG_MALI400_UMP */
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t  mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t  mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+       _mali_osk_list_t list;
+       u32 * usage_map;
+       u32 usage_count;
+       u32 num_pages;
+       mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+       _mali_osk_lock_t *lock;
+       _mali_osk_list_t partial;
+       _mali_osk_list_t full;
+       /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+       mali_address_manager_allocate, /* allocate */
+       mali_address_manager_release,  /* release */
+       mali_address_manager_map,      /* map_physical */
+       NULL                           /* unmap_physical not present*/
+};
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+       _mali_osk_mem_mapregion_init,  /* allocate */
+       _mali_osk_mem_mapregion_term,  /* release */
+       _mali_osk_mem_mapregion_map,   /* map_physical */
+       _mali_osk_mem_mapregion_unmap  /* unmap_physical */
+};
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+static void mali_mmu_page_table_cache_destroy(void);
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+mali_allocation_engine mali_mem_get_memory_engine(void)
+{
+       return memory_engine;
+}
+
+/* called during module init */
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_PRINT(2, ("Memory system initializing\n"));
+
+       err = mali_mmu_page_table_cache_create();
+       if(_MALI_OSK_ERR_OK != err)
+       {
+               MALI_ERROR(err);
+       }
+
+       memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+       MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+       MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+void mali_memory_terminate(void)
+{
+       MALI_DEBUG_PRINT(2, ("Memory system terminating\n"));
+
+       mali_mmu_page_table_cache_destroy();
+
+       while ( NULL != mem_region_registrations)
+       {
+               dedicated_memory_info * m;
+               m = mem_region_registrations;
+               mem_region_registrations = m->next;
+               _mali_osk_mem_unreqregion(m->base, m->size);
+               _mali_osk_free(m);
+       }
+
+       while ( NULL != physical_memory_allocators)
+       {
+               mali_physical_memory_allocator * m;
+               m = physical_memory_allocators;
+               physical_memory_allocators = m->next;
+               m->destroy(m);
+       }
+
+       if (NULL != memory_engine)
+       {
+               mali_allocation_engine_destroy(memory_engine);
+               memory_engine = NULL;
+       }
+}
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data)
+{
+       MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+       /* create descriptor mapping table */
+       session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+       if (NULL == session_data->descriptor_mapping)
+       {
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       session_data->memory_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+                                       | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+       if (NULL == session_data->memory_lock)
+       {
+               mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+               _mali_osk_free(session_data);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       /* Init the session's memory allocation list */
+       _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+       MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+       MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+       mali_memory_allocation * descriptor;
+
+       descriptor = (mali_memory_allocation*)map_target;
+
+       MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+       MALI_DEBUG_ASSERT(descriptor);
+
+       mali_allocation_engine_release_memory(memory_engine, descriptor);
+       _mali_osk_free(descriptor);
+}
+
+void mali_memory_session_end(struct mali_session_data *session_data)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_BUSY;
+
+       MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+       if (NULL == session_data)
+       {
+               MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+               return;
+       }
+
+       while (err == _MALI_OSK_ERR_BUSY)
+       {
+               /* Lock the session so we can modify the memory list */
+               _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+               err = _MALI_OSK_ERR_OK;
+
+               /* Free all memory engine allocations */
+               if (!_mali_osk_list_empty(&session_data->memory_head))
+               {
+                       mali_memory_allocation *descriptor;
+                       mali_memory_allocation *temp;
+                       _mali_uk_mem_munmap_s unmap_args;
+
+                       MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+                       unmap_args.ctx = session_data;
+
+                       /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+                       _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+                       {
+                               MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+                                                       descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+                                               );
+                               /* ASSERT that the descriptor's lock references the correct thing */
+                               MALI_DEBUG_ASSERT(  descriptor->lock == session_data->memory_lock );
+                               /* Therefore, we have already locked the descriptor */
+
+                               unmap_args.size = descriptor->size;
+                               unmap_args.mapping = descriptor->mapping;
+                               unmap_args.cookie = (u32)descriptor;
+
+                               /*
+                                       * This removes the descriptor from the list, and frees the descriptor
+                                       *
+                                       * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+                                       * the only OS we are aware of that requires indirect MMAP also has
+                                       * implicit mmap cleanup.
+                                       */
+                               err = _mali_ukk_mem_munmap_internal( &unmap_args );
+
+                               if (err == _MALI_OSK_ERR_BUSY)
+                               {
+                                       _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+                                       /*
+                                               * Reason for this;
+                                               * We where unable to stall the MMU, probably because we are in page fault handling.
+                                               * Sleep for a while with the session lock released, then try again.
+                                               * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+                                               */
+                                       _mali_osk_time_ubusydelay(10);
+                                       break; /* Will jump back into: "while (err == _MALI_OSK_ERR_BUSY)" */
+                               }
+                       }
+               }
+       }
+       /* Assert that we really did free everything */
+       MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+
+       if (NULL != session_data->descriptor_mapping)
+       {
+               mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+               mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+               session_data->descriptor_mapping = NULL;
+       }
+
+       _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+       /**
+        * @note Could the VMA close handler mean that we use the session data after it was freed?
+        * In which case, would need to refcount the session data, and free on VMA close
+        */
+
+       /* Free the lock */
+       _mali_osk_lock_term( session_data->memory_lock );
+
+       return;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+       mali_physical_memory_allocator * allocator;
+       mali_physical_memory_allocator ** next_allocator_list;
+
+       u32 alloc_order = 1; /* OS memory has second priority */
+
+       allocator = mali_os_allocator_create(size, 0 /* cpu_usage_adjust */, "Shared Mali GPU memory");
+       if (NULL == allocator)
+       {
+               MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       allocator->alloc_order = alloc_order;
+
+       /* link in the allocator: insertion into ordered list
+        * resources of the same alloc_order will be Last-in-first */
+       next_allocator_list = &physical_memory_allocators;
+
+       while (NULL != *next_allocator_list &&
+                       (*next_allocator_list)->alloc_order < alloc_order )
+       {
+               next_allocator_list = &((*next_allocator_list)->next);
+       }
+
+       allocator->next = (*next_allocator_list);
+       (*next_allocator_list) = allocator;
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+       mali_physical_memory_allocator * allocator;
+       mali_physical_memory_allocator ** next_allocator_list;
+       dedicated_memory_info * cleanup_data;
+
+       u32 alloc_order = 0; /* Dedicated range has first priority */
+
+       /* do the low level linux operation first */
+
+       /* Request ownership of the memory */
+       if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory"))
+       {
+               MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       /* create generic block allocator object to handle it */
+       allocator = mali_block_allocator_create(start, 0 /* cpu_usage_adjust */, size, "Dedicated Mali GPU memory");
+
+       if (NULL == allocator)
+       {
+               MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+               _mali_osk_mem_unreqregion(start, size);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       /* save low level cleanup info */
+       allocator->alloc_order = alloc_order;
+
+       cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+       if (NULL == cleanup_data)
+       {
+               _mali_osk_mem_unreqregion(start, size);
+               allocator->destroy(allocator);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       cleanup_data->base = start;
+       cleanup_data->size = size;
+
+       cleanup_data->next = mem_region_registrations;
+       mem_region_registrations = cleanup_data;
+
+       /* link in the allocator: insertion into ordered list
+        * resources of the same alloc_order will be Last-in-first */
+       next_allocator_list = &physical_memory_allocators;
+
+       while ( NULL != *next_allocator_list &&
+                       (*next_allocator_list)->alloc_order < alloc_order )
+       {
+               next_allocator_list = &((*next_allocator_list)->next);
+       }
+
+       allocator->next = (*next_allocator_list);
+       (*next_allocator_list) = allocator;
+
+       MALI_SUCCESS;
+}
+
+#if defined(CONFIG_MALI400_UMP)
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+       ump_dd_handle ump_mem;
+       u32 nr_blocks;
+       u32 i;
+       ump_dd_physical_block * ump_blocks;
+       ump_mem_allocation *ret_allocation;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+       ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+       if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+       ump_mem = (ump_dd_handle)ctx;
+
+       MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+       nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+       MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+       if (nr_blocks == 0)
+       {
+               MALI_DEBUG_PRINT(1, ("No block count\n"));
+               _mali_osk_free( ret_allocation );
+               return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+       }
+
+       ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+       if ( NULL==ump_blocks )
+       {
+               _mali_osk_free( ret_allocation );
+               return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+       }
+
+       if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+       {
+               _mali_osk_free(ump_blocks);
+               _mali_osk_free( ret_allocation );
+               return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+       }
+
+       /* Store away the initial offset for unmapping purposes */
+       ret_allocation->initial_offset = *offset;
+
+       for(i=0; i<nr_blocks; ++i)
+       {
+               MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+               if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+               {
+                       u32 size_allocated = *offset - ret_allocation->initial_offset;
+                       MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+                       /* unmap all previous blocks (if any) */
+                       mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+                       _mali_osk_free(ump_blocks);
+                       _mali_osk_free(ret_allocation);
+                       return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+               }
+               *offset += ump_blocks[i].size;
+       }
+
+       if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+       {
+               /* Map in an extra virtual guard page at the end of the VMA */
+               MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+               if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+               {
+                       u32 size_allocated = *offset - ret_allocation->initial_offset;
+                       MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+                       /* unmap all previous blocks (if any) */
+                       mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+                       _mali_osk_free(ump_blocks);
+                       _mali_osk_free(ret_allocation);
+                       return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+               }
+               *offset += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+
+       _mali_osk_free( ump_blocks );
+
+       ret_allocation->engine = engine;
+       ret_allocation->descriptor = descriptor;
+       ret_allocation->ump_mem = ump_mem;
+       ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+       alloc_info->ctx = NULL;
+       alloc_info->handle = ret_allocation;
+       alloc_info->next = NULL;
+       alloc_info->release = ump_memory_release;
+
+       return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+       ump_dd_handle ump_mem;
+       ump_mem_allocation *allocation;
+
+       allocation = (ump_mem_allocation *)handle;
+
+       MALI_DEBUG_ASSERT_POINTER( allocation );
+
+       ump_mem = allocation->ump_mem;
+
+       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+       /* At present, this is a no-op. But, it allows the mali_address_manager to
+        * do unmapping of a subrange in future. */
+       mali_allocation_engine_unmap_physical( allocation->engine,
+                                                                                  allocation->descriptor,
+                                                                                  allocation->initial_offset,
+                                                                                  allocation->size_allocated,
+                                                                                  (_mali_osk_mem_mapregion_flags_t)0
+                                                                                  );
+       _mali_osk_free( allocation );
+
+
+       ump_dd_reference_release(ump_mem) ;
+       return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+       ump_dd_handle ump_mem;
+       mali_physical_memory_allocator external_memory_allocator;
+       struct mali_session_data *session_data;
+       mali_memory_allocation * descriptor;
+       int md;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+       /* check arguments */
+       /* NULL might be a valid Mali address */
+       if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       /* size must be a multiple of the system page size */
+       if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       MALI_DEBUG_PRINT(3,
+                        ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+                         args->secure_id, args->mali_address, args->size));
+
+       ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+       if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+       descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+       if (NULL == descriptor)
+       {
+               ump_dd_reference_release(ump_mem);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       descriptor->size = args->size;
+       descriptor->mapping = NULL;
+       descriptor->mali_address = args->mali_address;
+       descriptor->mali_addr_mapping_info = (void*)session_data;
+       descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+       descriptor->cache_settings = (u32) MALI_CACHE_STANDARD;
+       descriptor->lock = session_data->memory_lock;
+
+       if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+       {
+               descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+       }
+       _mali_osk_list_init( &descriptor->list );
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+       {
+               ump_dd_reference_release(ump_mem);
+               _mali_osk_free(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       external_memory_allocator.allocate = ump_memory_commit;
+       external_memory_allocator.allocate_page_table_block = NULL;
+       external_memory_allocator.ctx = ump_mem;
+       external_memory_allocator.name = "UMP Memory";
+       external_memory_allocator.next = NULL;
+
+       _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+       {
+               _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+               mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+               ump_dd_reference_release(ump_mem);
+               _mali_osk_free(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+       args->cookie = md;
+
+       MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+       /* All OK */
+       MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+       mali_memory_allocation * descriptor;
+       struct mali_session_data *session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+       {
+               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       descriptor = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+       if (NULL != descriptor)
+       {
+               _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+               mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+               _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+               _mali_osk_free(descriptor);
+       }
+
+       MALI_SUCCESS;
+
+}
+#endif /* CONFIG_MALI400_UMP */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+       u32 * data;
+       external_mem_allocation * ret_allocation;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+       ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+       if ( NULL == ret_allocation )
+       {
+               return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+       }
+
+       data = (u32*)ctx;
+
+       ret_allocation->engine = engine;
+       ret_allocation->descriptor = descriptor;
+       ret_allocation->initial_offset = *offset;
+
+       alloc_info->ctx = NULL;
+       alloc_info->handle = ret_allocation;
+       alloc_info->next = NULL;
+       alloc_info->release = external_memory_release;
+
+       MALI_DEBUG_PRINT(5, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+       if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+       {
+               MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+               _mali_osk_free(ret_allocation);
+               return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+       }
+       *offset += data[1];
+
+       if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+       {
+               /* Map in an extra virtual guard page at the end of the VMA */
+               MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+               if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+               {
+                       u32 size_allocated = *offset - ret_allocation->initial_offset;
+                       MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+                       /* unmap what we previously mapped */
+                       mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+                       _mali_osk_free(ret_allocation);
+                       return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+               }
+               *offset += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+
+       ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+       return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+       external_mem_allocation * allocation;
+
+       allocation = (external_mem_allocation *) handle;
+       MALI_DEBUG_ASSERT_POINTER( allocation );
+
+       /* At present, this is a no-op. But, it allows the mali_address_manager to
+        * do unmapping of a subrange in future. */
+
+       mali_allocation_engine_unmap_physical( allocation->engine,
+                                                                                  allocation->descriptor,
+                                                                                  allocation->initial_offset,
+                                                                                  allocation->size,
+                                                                                  (_mali_osk_mem_mapregion_flags_t)0
+                                                                                  );
+
+       _mali_osk_free( allocation );
+
+       return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+       mali_physical_memory_allocator external_memory_allocator;
+       struct mali_session_data *session_data;
+       u32 info[2];
+       mali_memory_allocation * descriptor;
+       int md;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+       external_memory_allocator.allocate = external_memory_commit;
+       external_memory_allocator.allocate_page_table_block = NULL;
+       external_memory_allocator.ctx = &info[0];
+       external_memory_allocator.name = "External Memory";
+       external_memory_allocator.next = NULL;
+
+       /* check arguments */
+       /* NULL might be a valid Mali address */
+       if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       /* size must be a multiple of the system page size */
+       if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       MALI_DEBUG_PRINT(3,
+               ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+               (void*)args->phys_addr,
+               (void*)(args->phys_addr + args->size -1),
+               (void*)args->mali_address)
+       );
+
+       /* Validate the mali physical range */
+       if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size))
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       info[0] = args->phys_addr;
+       info[1] = args->size;
+
+       descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+       if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+       descriptor->size = args->size;
+       descriptor->mapping = NULL;
+       descriptor->mali_address = args->mali_address;
+       descriptor->mali_addr_mapping_info = (void*)session_data;
+       descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+       descriptor->cache_settings = (u32)MALI_CACHE_STANDARD;
+       descriptor->lock = session_data->memory_lock;
+       if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+       {
+               descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+       }
+       _mali_osk_list_init( &descriptor->list );
+
+       _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+       {
+               _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_free(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+       {
+               mali_allocation_engine_release_memory(memory_engine, descriptor);
+               _mali_osk_free(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       args->cookie = md;
+
+       MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+       /* All OK */
+       MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+       mali_memory_allocation * descriptor;
+       void* old_value;
+       struct mali_session_data *session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+       {
+               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       old_value = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+       if (NULL != old_value)
+       {
+               _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+               mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+               _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+               _mali_osk_free(descriptor);
+       }
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+       args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+       struct mali_session_data *session_data;
+       u32 actual_size;
+
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+
+       actual_size = descriptor->size;
+
+       if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+       {
+               actual_size += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+
+       return mali_mmu_pagedir_map(session_data->page_directory, descriptor->mali_address, actual_size);
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+       const u32 illegal_mali_address = 0xffffffff;
+       struct mali_session_data *session_data;
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       /* It is allowed to call this function several times on the same descriptor.
+          When memory is released we set the illegal_mali_address so we can early out here. */
+       if ( illegal_mali_address == descriptor->mali_address) return;
+
+       session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+       mali_mmu_pagedir_unmap(session_data->page_directory, descriptor->mali_address, descriptor->size);
+
+       descriptor->mali_address = illegal_mali_address ;
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+       struct mali_session_data *session_data;
+       u32 mali_address;
+
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(phys_addr);
+
+       session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+       MALI_DEBUG_ASSERT_POINTER(session_data);
+
+       mali_address = descriptor->mali_address + offset;
+
+       MALI_DEBUG_PRINT(7, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", *phys_addr, mali_address, size));
+
+       mali_mmu_pagedir_update(session_data->page_directory, mali_address, *phys_addr, size, descriptor->cache_settings);
+
+       MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+       struct mali_session_data *session_data;
+       mali_memory_allocation * descriptor;
+
+       /* validate input */
+       if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+       /* Unpack arguments */
+       session_data = (struct mali_session_data *)args->ctx;
+
+       /* validate input */
+       if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+       descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+       if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+       descriptor->size = args->size;
+       descriptor->mali_address = args->phys_addr;
+       descriptor->mali_addr_mapping_info = (void*)session_data;
+
+       descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+       descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+       descriptor->cache_settings = (u32) args->cache_settings ;
+       descriptor->lock = session_data->memory_lock;
+       _mali_osk_list_init( &descriptor->list );
+
+       _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+       {
+               /* We do not FLUSH nor TLB_ZAP on MMAP, since we do both of those on job start*/
+               _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+               args->mapping = descriptor->mapping;
+               args->cookie = (u32)descriptor;
+
+               MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+               MALI_SUCCESS;
+       }
+       else
+       {
+               _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+               /* OOM, but not a fatal error */
+               MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+               _mali_osk_free(descriptor);
+               /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+}
+
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+       struct mali_session_data *session_data;
+       mali_memory_allocation * descriptor;
+
+       descriptor = (mali_memory_allocation *)args->cookie;
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       /** @note args->context unused; we use the memory_session from the cookie */
+       /* args->mapping and args->size are also discarded. They are only necessary
+          for certain do_munmap implementations. However, they could be used to check the
+          descriptor at this point. */
+
+       session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+       MALI_DEBUG_ASSERT_POINTER(session_data);
+
+       /* Unmapping the memory from the mali virtual address space.
+          It is allowed to call this function severeal times, which might happen if zapping below fails. */
+       mali_allocation_engine_release_pt1_mali_pagetables_unmap(memory_engine, descriptor);
+
+#ifdef MALI_UNMAP_FLUSH_ALL_MALI_L2
+       {
+               u32 i;
+               u32 number_of_l2_ccores = mali_l2_cache_core_get_glob_num_l2_cores();
+               for (i = 0; i < number_of_l2_ccores; i++)
+               {
+                       struct mali_l2_cache_core *core;
+                       core = mali_l2_cache_core_get_glob_l2_core(i);
+                       if (mali_l2_cache_power_is_enabled_get(core) )
+                       {
+                               mali_l2_cache_invalidate_all_force(core);
+                       }
+               }
+       }
+#endif
+
+       mali_scheduler_zap_all_active(session_data);
+
+       /* Removes the descriptor from the session's memory list, releases physical memory, releases descriptor */
+       mali_allocation_engine_release_pt2_physical_memory_free(memory_engine, descriptor);
+
+       _mali_osk_free(descriptor);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+       mali_memory_allocation * descriptor;
+       _mali_osk_lock_t *descriptor_lock;
+       _mali_osk_errcode_t err;
+
+       descriptor = (mali_memory_allocation *)args->cookie;
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+       /** @note args->context unused; we use the memory_session from the cookie */
+       /* args->mapping and args->size are also discarded. They are only necessary
+       for certain do_munmap implementations. However, they could be used to check the
+       descriptor at this point. */
+
+       MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)descriptor->mali_addr_mapping_info);
+
+       descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+       err = _MALI_OSK_ERR_BUSY;
+       while (err == _MALI_OSK_ERR_BUSY)
+       {
+               if (descriptor_lock)
+               {
+                       _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+               }
+
+               err = _mali_ukk_mem_munmap_internal( args );
+
+               if (descriptor_lock)
+               {
+                       _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+               }
+
+               if (err == _MALI_OSK_ERR_BUSY)
+               {
+                       /*
+                        * Reason for this;
+                        * We where unable to stall the MMU, probably because we are in page fault handling.
+                        * Sleep for a while with the session lock released, then try again.
+                        * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+                        */
+                       _mali_osk_time_ubusydelay(10);
+               }
+       }
+
+       return err;
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+       return mali_allocation_engine_memory_usage(physical_memory_allocators);
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+       _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (!_mali_osk_list_empty(&page_table_cache.partial))
+       {
+               mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+               int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+               MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+               _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+               alloc->usage_count++;
+               if (alloc->num_pages == alloc->usage_count)
+               {
+                       /* full, move alloc to full list*/
+                       _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+               }
+               _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+               *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+               *mapping =  (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+               MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+               MALI_SUCCESS;
+       }
+       else
+       {
+               mali_mmu_page_table_allocation * alloc;
+               /* no free pages, allocate a new one */
+
+               alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+               if (NULL == alloc)
+               {
+                       _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+                       *table_page = MALI_INVALID_PAGE;
+                       MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+               }
+
+               _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+               if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+               {
+                       MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+                       _mali_osk_free(alloc);
+                       _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+                       *table_page = MALI_INVALID_PAGE;
+                       *mapping = NULL;
+                       MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+               }
+
+               /* create the usage map */
+               alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+               alloc->usage_count = 1;
+               MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+               alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+               if (NULL == alloc->usage_map)
+               {
+                       MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+                       alloc->pages.release(&alloc->pages);
+                       _mali_osk_free(alloc);
+                       _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+                       *table_page = MALI_INVALID_PAGE;
+                       *mapping = NULL;
+                       MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+               }
+
+               _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+               if (alloc->num_pages > 1)
+               {
+                       _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+               }
+               else
+               {
+                       _mali_osk_list_add(&alloc->list, &page_table_cache.full);
+               }
+
+               _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+               *table_page = alloc->pages.phys_base; /* return the first page */
+               *mapping = alloc->pages.mapping; /* Mapping for first page */
+               MALI_DEBUG_PRINT(4, ("Page table allocated: VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+               MALI_SUCCESS;
+       }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+       mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+       MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+       MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+       _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+       /* find the entry this address belongs to */
+       /* first check the partial list */
+       _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+       {
+               u32 start = alloc->pages.phys_base;
+               u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+               if (pa >= start && pa <= last)
+               {
+                       MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+                       _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+                       alloc->usage_count--;
+
+                       _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+                       if (0 == alloc->usage_count)
+                       {
+                               /* empty, release whole page alloc */
+                               _mali_osk_list_del(&alloc->list);
+                               alloc->pages.release(&alloc->pages);
+                               _mali_osk_free(alloc->usage_map);
+                               _mali_osk_free(alloc);
+                       }
+                       _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+                       MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+                       return;
+               }
+       }
+
+       /* the check the full list */
+       _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+       {
+               u32 start = alloc->pages.phys_base;
+               u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+               if (pa >= start && pa <= last)
+               {
+                       _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+                       alloc->usage_count--;
+
+                       _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+
+                       if (0 == alloc->usage_count)
+                       {
+                               /* empty, release whole page alloc */
+                               _mali_osk_list_del(&alloc->list);
+                               alloc->pages.release(&alloc->pages);
+                               _mali_osk_free(alloc->usage_map);
+                               _mali_osk_free(alloc);
+                       }
+                       else
+                       {
+                               /* transfer to partial list */
+                               _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+                       }
+
+                       _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+                       MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+                       return;
+               }
+       }
+
+       MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+       _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+       page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+                                   | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE);
+       MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+       _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+       _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+       MALI_SUCCESS;
+}
+
+static void mali_mmu_page_table_cache_destroy(void)
+{
+       mali_mmu_page_table_allocation * alloc, *temp;
+
+       _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+       {
+               MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+               _mali_osk_list_del(&alloc->list);
+               alloc->pages.release(&alloc->pages);
+               _mali_osk_free(alloc->usage_map);
+               _mali_osk_free(alloc);
+       }
+
+       MALI_DEBUG_PRINT_IF(1, !_mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+       _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+       {
+               MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+               _mali_osk_list_del(&alloc->list);
+               alloc->pages.release(&alloc->pages);
+               _mali_osk_free(alloc->usage_map);
+               _mali_osk_free(alloc);
+       }
+
+       _mali_osk_lock_term(page_table_cache.lock);
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_memory.h b/drivers/gpu/arm/mali400/mali/common/mali_memory.h
new file mode 100644 (file)
index 0000000..d2cef59
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+/** @brief Initialize Mali memory subsystem
+ *
+ * Allocate and initialize internal data structures. Must be called before
+ * allocating Mali memory.
+ *
+ * @return On success _MALI_OSK_ERR_OK, othervise some error code describing the error.
+ */
+_mali_osk_errcode_t mali_memory_initialize(void);
+
+/** @brief Terminate Mali memory system
+ *
+ * Clean up and release internal data structures.
+ */
+void mali_memory_terminate(void);
+
+/** @brief Start new Mali memory session
+ *
+ * Allocate and prepare session specific memory allocation data data. The
+ * session page directory, lock, and descriptor map is set up.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *mali_session_data);
+
+/** @brief Close a Mali memory session
+ *
+ * Release session specific memory allocation related data.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+void mali_memory_session_end(struct mali_session_data *mali_session_data);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, othervise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+void mali_mmu_release_table_page(u32 pa);
+
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+mali_allocation_engine mali_mem_get_memory_engine(void);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_mmu.c b/drivers/gpu/arm/mali400/mali/common/mali_mmu.c
new file mode 100644 (file)
index 0000000..382a17e
--- /dev/null
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+       MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+       MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+       MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
+       MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+       MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+       MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+       MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+       /* allocate the helper pages */
+       mali_empty_page_directory = mali_allocate_empty_page();
+       if(0 == mali_empty_page_directory)
+       {
+               mali_empty_page_directory = MALI_INVALID_PAGE;
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+                                       &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page))
+       {
+               mali_free_empty_page(mali_empty_page_directory);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+       /* Free global helper pages */
+       mali_free_empty_page(mali_empty_page_directory);
+
+       /* Free the page fault flush pages */
+       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+                                   &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+       struct mali_mmu_core* mmu = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(resource);
+
+       MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+       mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
+       if (NULL != mmu)
+       {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
+               {
+                       if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu))
+                       {
+                               if (is_virtual)
+                               {
+                                       /* Skip reset and IRQ setup for virtual MMU */
+                                       return mmu;
+                               }
+
+                               if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
+                               {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       mmu->irq = _mali_osk_irq_init(resource->irq,
+                                                                     mali_group_upper_half_mmu,
+                                                                     group,
+                                                                     mali_mmu_probe_trigger,
+                                                                     mali_mmu_probe_ack,
+                                                                     mmu,
+                                                                     "mali_mmu_irq_handlers");
+                                       if (NULL != mmu->irq)
+                                       {
+                                               return mmu;
+                                       }
+                                       else
+                                       {
+                                               MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+                                       }
+                               }
+                               mali_group_remove_mmu_core(group);
+                       }
+                       else
+                       {
+                               MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+                       }
+                       mali_hw_core_delete(&mmu->hw_core);
+               }
+
+               _mali_osk_free(mmu);
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+       }
+
+       return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+       if (NULL != mmu->irq)
+       {
+               _mali_osk_irq_term(mmu->irq);
+       }
+
+       mali_hw_core_delete(&mmu->hw_core);
+       _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+       int i;
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; ++i)
+       {
+               if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED)
+               {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_SLOW == i)
+       {
+               MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+       }
+}
+
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+       int i;
+       u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+       if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) )
+       {
+               MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enebled.\n"));
+               return MALI_TRUE;
+       }
+
+       if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+       {
+               MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+               return MALI_FALSE;
+       }
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; ++i)
+       {
+               mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+               if (mmu_status & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) &&
+                   (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE)))
+               {
+                       break;
+               }
+               if (0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED )))
+               {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_SLOW == i)
+       {
+               MALI_PRINT_ERROR(("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+               return MALI_FALSE;
+       }
+
+       if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+       {
+               MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it has a pagefault.\n"));
+               return MALI_FALSE;
+       }
+
+       return MALI_TRUE;
+}
+
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+       int i;
+       u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+       if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+       {
+               MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+               return;
+       }
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
+       {
+               MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+               return;
+       }
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; ++i)
+       {
+               u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+               if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) )
+               {
+                       break;
+               }
+               if ( status &  MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+               {
+                       break;
+               }
+               if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+               {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_SLOW == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+       MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+       int i;
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; ++i)
+       {
+               if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0)
+               {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_SLOW == i)
+       {
+               MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       stall_success = mali_mmu_enable_stall(mmu);
+
+       /* The stall can not fail in current hw-state */
+       MALI_DEBUG_ASSERT(stall_success);
+
+       MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+       if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu))
+       {
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+               /* no session is active, so just activate the empty page directory */
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+               mali_mmu_enable_paging(mmu);
+               err = _MALI_OSK_ERR_OK;
+       }
+       mali_mmu_disable_stall(mmu);
+
+       return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+       mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+       if (MALI_FALSE == stall_success)
+       {
+               /* False means that it is in Pagefault state. Not possible to disable_stall then */
+               return MALI_FALSE;
+       }
+
+       mali_mmu_disable_stall(mmu);
+       return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+       /* The MMU must be in stalled or page fault mode, for this writing to work */
+       MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+                         & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+       stall_success = mali_mmu_enable_stall(mmu);
+
+       if ( MALI_FALSE==stall_success ) return MALI_FALSE;
+       mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+       mali_mmu_disable_stall(mmu);
+       return MALI_TRUE;
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
+{
+       mali_bool stall_success;
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+       MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+       stall_success = mali_mmu_enable_stall(mmu);
+       /* This function can only be called when the core is idle, so it could not fail. */
+       MALI_DEBUG_ASSERT( stall_success );
+       mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+       mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
+{
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+       stall_success = mali_mmu_enable_stall(mmu);
+       /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+       mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+       if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+       struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+       struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+       u32 int_stat;
+
+       int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+       MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+       if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+       {
+               MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+       }
+       else
+       {
+               MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+       }
+
+       if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+       {
+               MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+       }
+       else
+       {
+               MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+       }
+
+       if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+                        (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+       {
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+       MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_mmu.h b/drivers/gpu/arm/mali400/mali/common/mali_mmu.h
new file mode 100644 (file)
index 0000000..3410c0d
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+       MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+       MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+       MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+       MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+       MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+       MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+       MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+       MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+       MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+       MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+       MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits
+{
+       MALI_MMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
+       MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
+       MALI_MMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
+       MALI_MMU_STATUS_BIT_IDLE                = 1 << 3,
+       MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+       MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+       MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core
+{
+       struct mali_hw_core hw_core; /**< Common for all HW cores */
+       _mali_osk_irq_t *irq;        /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core* mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu);
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu);
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.c b/drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.c
new file mode 100644 (file)
index 0000000..6a67fff
--- /dev/null
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(void)
+{
+       _mali_osk_errcode_t err;
+       mali_io_address mapping;
+       u32 address;
+
+       if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping))
+       {
+               /* Allocation failed */
+               return 0;
+       }
+
+       MALI_DEBUG_ASSERT_POINTER( mapping );
+
+       err = fill_page(mapping, 0);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               mali_mmu_release_table_page(address);
+       }
+       return address;
+}
+
+void mali_free_empty_page(u32 address)
+{
+       if (MALI_INVALID_PAGE != address)
+       {
+               mali_mmu_release_table_page(address);
+       }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+       _mali_osk_errcode_t err;
+       mali_io_address page_directory_mapping;
+       mali_io_address page_table_mapping;
+       mali_io_address data_page_mapping;
+
+       err = mali_mmu_get_table_page(data_page, &data_page_mapping);
+       if (_MALI_OSK_ERR_OK == err)
+       {
+               err = mali_mmu_get_table_page(page_table, &page_table_mapping);
+               if (_MALI_OSK_ERR_OK == err)
+               {
+                       err = mali_mmu_get_table_page(page_directory, &page_directory_mapping);
+                       if (_MALI_OSK_ERR_OK == err)
+                       {
+                               fill_page(data_page_mapping, 0);
+                               fill_page(page_table_mapping, *data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+                               fill_page(page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+                               MALI_SUCCESS;
+                       }
+                       mali_mmu_release_table_page(*page_table);
+                       *page_table = MALI_INVALID_PAGE;
+               }
+               mali_mmu_release_table_page(*data_page);
+               *data_page = MALI_INVALID_PAGE;
+       }
+       return err;
+}
+
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+       if (MALI_INVALID_PAGE != *page_directory)
+       {
+               mali_mmu_release_table_page(*page_directory);
+               *page_directory = MALI_INVALID_PAGE;
+       }
+
+       if (MALI_INVALID_PAGE != *page_table)
+       {
+               mali_mmu_release_table_page(*page_table);
+               *page_table = MALI_INVALID_PAGE;
+       }
+
+       if (MALI_INVALID_PAGE != *data_page)
+       {
+               mali_mmu_release_table_page(*data_page);
+               *data_page = MALI_INVALID_PAGE;
+       }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+       int i;
+       MALI_DEBUG_ASSERT_POINTER( mapping );
+
+       for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+       {
+#ifdef CONFIG_SLP_MALI_DBG
+               _mali_osk_mem_iowrite32_relaxed_cpu( mapping, i * sizeof(u32), data);
+#else
+               _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+#endif
+       }
+       _mali_osk_mem_barrier();
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+       const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+       const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+       _mali_osk_errcode_t err;
+       mali_io_address pde_mapping;
+       u32 pde_phys;
+       int i;
+
+       for(i = first_pde; i <= last_pde; i++)
+       {
+#ifdef CONFIG_SLP_MALI_DBG
+               if(0 == (_mali_osk_mem_ioread32_cpu(pagedir->page_directory_mapped, i*sizeof(u32))&MALI_MMU_FLAGS_PRESENT))
+#else
+               if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT))
+#endif
+               {
+                       /* Page table not present */
+                       MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+                       MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+                       err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+                       if(_MALI_OSK_ERR_OK != err)
+                       {
+                               MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+                               return err;
+                       }
+                       pagedir->page_entries_mapped[i] = pde_mapping;
+
+                       /* Update PDE, mark as present */
+#ifdef CONFIG_SLP_MALI_DBG
+                       _mali_osk_mem_iowrite32_relaxed_cpu(pagedir->page_directory_mapped, i*sizeof(u32),
+                                       pde_phys | MALI_MMU_FLAGS_PRESENT);
+#else
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
+                                       pde_phys | MALI_MMU_FLAGS_PRESENT);
+#endif
+
+                       MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+                       pagedir->page_entries_usage_count[i] = 1;
+               }
+               else
+               {
+                       pagedir->page_entries_usage_count[i]++;
+               }
+       }
+       _mali_osk_write_mem_barrier();
+
+       MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+       int i;
+       const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+       const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+       for (i = first_pte; i <= last_pte; i++)
+       {
+#ifdef CONFIG_SLP_MALI_DBG
+               _mali_osk_mem_iowrite32_relaxed_cpu(page_table, i * sizeof(u32), 0);
+#else
+               _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+#endif
+       }
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+       const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+       const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+       u32 left = size;
+       int i;
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+       mali_bool pd_changed = MALI_FALSE;
+       u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+       u32 num_pages_inv = 0;
+#endif
+
+       /* For all page directory entries in range. */
+       for (i = first_pde; i <= last_pde; i++)
+       {
+               u32 size_in_pde, offset;
+
+               MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+               MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+               /* Offset into page table, 0 if mali_address is 4MiB aligned */
+               offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+               if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset)
+               {
+                       size_in_pde = left;
+               }
+               else
+               {
+                       size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+               }
+
+               pagedir->page_entries_usage_count[i]--;
+
+               /* If entire page table is unused, free it */
+               if (0 == pagedir->page_entries_usage_count[i])
+               {
+                       u32 page_address;
+                       MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+                       /* last reference removed, no need to zero out each PTE  */
+
+#ifdef CONFIG_SLP_MALI_DBG
+                       page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32_cpu(pagedir->page_directory_mapped, i*sizeof(u32)));
+#else
+                       page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
+#endif
+                       pagedir->page_entries_mapped[i] = NULL;
+#ifdef CONFIG_SLP_MALI_DBG
+                       _mali_osk_mem_iowrite32_relaxed_cpu(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+#else
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+#endif
+
+                       mali_mmu_release_table_page(page_address);
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+                       pd_changed = MALI_TRUE;
+#endif
+               }
+               else
+               {
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+                       pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+                       num_pages_inv++;
+                       MALI_DEBUG_ASSERT(num_pages_inv<3);
+#endif
+
+                       /* If part of the page table is still in use, zero the relevant PTEs */
+                       mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+               }
+
+               left -= size_in_pde;
+               mali_address += size_in_pde;
+       }
+       _mali_osk_write_mem_barrier();
+
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+       /* L2 pages invalidation */
+       if (MALI_TRUE == pd_changed)
+       {
+               pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+               num_pages_inv++;
+               MALI_DEBUG_ASSERT(num_pages_inv<3);
+       }
+
+       if (_MALI_PRODUCT_ID_MALI200 != mali_kernel_core_get_product_id())
+       {
+               mali_l2_cache_invalidate_pages_conditional(pages_to_invalidate, num_pages_inv);
+       }
+#endif
+
+       MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+       struct mali_page_directory *pagedir;
+
+       pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+       if(NULL == pagedir)
+       {
+               return NULL;
+       }
+
+       if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped))
+       {
+               _mali_osk_free(pagedir);
+               return NULL;
+       }
+
+       /* Zero page directory */
+       fill_page(pagedir->page_directory_mapped, 0);
+
+       return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+       const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+       int i;
+
+       /* Free referenced page tables and zero PDEs. */
+       for (i = 0; i < num_page_table_entries; i++)
+       {
+#ifdef CONFIG_SLP_MALI_DBG
+               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32_cpu(pagedir->page_directory_mapped, sizeof(u32)*i)&MALI_MMU_FLAGS_PRESENT))
+               {
+                       mali_mmu_release_table_page( _mali_osk_mem_ioread32_cpu(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+                       _mali_osk_mem_iowrite32_relaxed_cpu(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+               }
+#else
+               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+               {
+                       mali_mmu_release_table_page( _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+               }
+#endif
+       }
+       _mali_osk_write_mem_barrier();
+
+       /* Free the page directory page. */
+       mali_mmu_release_table_page(pagedir->page_directory);
+
+       _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, mali_memory_cache_settings cache_settings)
+{
+       u32 end_address = mali_address + size;
+       u32 permission_bits;
+
+       switch ( cache_settings )
+       {
+               case MALI_CACHE_GP_READ_ALLOCATE:
+               MALI_DEBUG_PRINT(5, ("Map L2 GP_Read_allocate\n"));
+               permission_bits = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+               break;
+
+               case MALI_CACHE_STANDARD:
+               MALI_DEBUG_PRINT(5, ("Map L2 Standard\n"));
+               /*falltrough */
+               default:
+               if ( MALI_CACHE_STANDARD != cache_settings) MALI_PRINT_ERROR(("Wrong cache settings\n"));
+               permission_bits = MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT;
+       }
+
+       /* Map physical pages into MMU page tables */
+       for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE)
+       {
+               MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+#ifdef CONFIG_SLP_MALI_DBG
+               _mali_osk_mem_iowrite32_relaxed_cpu( pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY( mali_address)],
+                               MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+                               phys_address | permission_bits);
+#else
+               _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+                               MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+                               phys_address | permission_bits);
+#endif
+       }
+       _mali_osk_write_mem_barrier();
+}
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+#ifdef CONFIG_SLP_MALI_DBG
+       return (_mali_osk_mem_ioread32_cpu( pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+#else
+       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+#endif
+}
+
+/* For instrumented */
+struct dump_info
+{
+       u32 buffer_left;
+       u32 register_writes_size;
+       u32 page_table_dump_size;
+       u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+       if (NULL != info)
+       {
+               info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+               if (NULL != info->buffer)
+               {
+                       /* check that we have enough space */
+                       if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+                       *info->buffer = where;
+                       info->buffer++;
+
+                       *info->buffer = what;
+                       info->buffer++;
+
+                       info->buffer_left -= sizeof(u32)*2;
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
+{
+       if (NULL != info)
+       {
+               /* 4096 for the page and 4 bytes for the address */
+               const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+               const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+               const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+               info->page_table_dump_size += dump_size_in_bytes;
+
+               if (NULL != info->buffer)
+               {
+                       if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+                       *info->buffer = phys_addr;
+                       info->buffer++;
+
+                       _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+                       info->buffer += page_size_in_elements;
+
+                       info->buffer_left -= dump_size_in_bytes;
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+       MALI_DEBUG_ASSERT_POINTER(pagedir);
+       MALI_DEBUG_ASSERT_POINTER(info);
+
+       if (NULL != pagedir->page_directory_mapped)
+       {
+               int i;
+
+               MALI_CHECK_NO_ERROR(
+                       dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+                       );
+
+               for (i = 0; i < 1024; i++)
+               {
+                       if (NULL != pagedir->page_entries_mapped[i])
+                       {
+                               MALI_CHECK_NO_ERROR(
+                                   dump_page(pagedir->page_entries_mapped[i],
+#ifdef CONFIG_SLP_MALI_DBG
+                                       _mali_osk_mem_ioread32_cpu(pagedir->page_directory_mapped,
+                                       i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+#else
+                                       _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                       i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+#endif
+                               );
+                       }
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+       MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+                                    "set the page directory address", info));
+       MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+       MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+       struct dump_info info = { 0, 0, 0, NULL };
+       struct mali_session_data * session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)(args->ctx);
+
+       MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+       MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+       args->size = info.register_writes_size + info.page_table_dump_size;
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+       struct dump_info info = { 0, 0, 0, NULL };
+       struct mali_session_data * session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)(args->ctx);
+
+       info.buffer_left = args->size;
+       info.buffer = args->buffer;
+
+       args->register_writes = info.buffer;
+       MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+       args->page_table_dump = info.buffer;
+       MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+       args->register_writes_size = info.register_writes_size;
+       args->page_table_dump_size = info.page_table_dump_size;
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.h b/drivers/gpu/arm/mali400/mali/common/mali_mmu_page_directory.h
new file mode 100644 (file)
index 0000000..67225b1
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+       MALI_MMU_FLAGS_PRESENT = 0x01,
+       MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+       MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+       MALI_MMU_FLAGS_OVERRIDE_CACHE  = 0x8,
+       MALI_MMU_FLAGS_WRITE_CACHEABLE  = 0x10,
+       MALI_MMU_FLAGS_WRITE_ALLOCATE  = 0x20,
+       MALI_MMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
+       MALI_MMU_FLAGS_READ_CACHEABLE  = 0x80,
+       MALI_MMU_FLAGS_READ_ALLOCATE  = 0x100,
+       MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+MALI_MMU_FLAGS_PRESENT | \
+       MALI_MMU_FLAGS_READ_PERMISSION |  \
+       MALI_MMU_FLAGS_WRITE_PERMISSION | \
+       MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+       MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+       MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+       MALI_MMU_FLAGS_READ_CACHEABLE | \
+       MALI_MMU_FLAGS_READ_ALLOCATE )
+
+
+struct mali_page_directory
+{
+       u32 page_directory; /**< Physical address of the memory session's page directory */
+       mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+       mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+       u32   page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range)  */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+
+u32 mali_allocate_empty_page(void);
+void mali_free_empty_page(u32 address);
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_osk.h b/drivers/gpu/arm/mali400/mali/common/mali_osk.h
new file mode 100644 (file)
index 0000000..aa6d5fc
--- /dev/null
@@ -0,0 +1,1804 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+       typedef unsigned char      u8;
+       typedef signed char        s8;
+       typedef unsigned short     u16;
+       typedef signed short       s16;
+       typedef unsigned int       u32;
+       typedef signed int         s32;
+       typedef unsigned long long u64;
+       #define BITS_PER_LONG (sizeof(long)*8)
+#else
+       /* Ensure Linux types u32, etc. are defined */
+       #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+  */
+       typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+       #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+       #define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER     ((u32)-1)
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+    _MALI_OSK_ERR_OK = 0, /**< Success. */
+    _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+    _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+    _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+    _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+    _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+    _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+    _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+    _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+       _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_t_struct _mali_osk_wq_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)( void * arg );
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void  (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t  (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+    union
+    {
+        u32 val;
+        void *obj;
+    } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * Holding more than one lock of the same order at the same time is not
+ * allowed.
+ *
+ */
+typedef enum
+{
+       _MALI_OSK_LOCK_ORDER_LAST = 0,
+
+       _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+       _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+       _MALI_OSK_LOCK_ORDER_UTILIZATION,
+       _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+       _MALI_OSK_LOCK_ORDER_PROFILING,
+       _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+       _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+       _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+       _MALI_OSK_LOCK_ORDER_SCHEDULER,
+       _MALI_OSK_LOCK_ORDER_GROUP,
+       _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
+       _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+       _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+       _MALI_OSK_LOCK_ORDER_MEM_INFO,
+       _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+       _MALI_OSK_LOCK_ORDER_SESSIONS,
+
+       _MALI_OSK_LOCK_ORDER_FIRST
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+       _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1,          /**< Specifically, don't sleep on those architectures that require it */
+       _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2,  /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+       _MALI_OSK_LOCKFLAG_READERWRITER = 0x4,      /**< Optimise for readers/writers */
+       _MALI_OSK_LOCKFLAG_ORDERED = 0x8,           /**< Use the order parameter; otherwise use automatic ordering */
+       _MALI_OSK_LOCKFLAG_ONELOCK = 0x10,          /**< Each thread can only hold one lock at a time */
+       _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20,    /**<  IRQ version of spinlock */
+       /** @enum _mali_osk_lock_flags_t
+        *
+        * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+       _MALI_OSK_LOCKMODE_UNDEF = -1,  /**< Undefined lock mode. For internal use only */
+       _MALI_OSK_LOCKMODE_RW    = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+       _MALI_OSK_LOCKMODE_RO,          /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+       /** @enum _mali_osk_lock_mode_t
+        *
+        * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner(l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock );
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes.               */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes.               */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+       _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+       u32 notification_type;   /**< The notification type */
+       u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+       void * result_buffer;   /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+       struct _mali_osk_list_s *next;
+       struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp)      _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ *   ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+             ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+            _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member)         \
+        for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member),      \
+             tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+             &ptr->member != (list);                                    \
+             ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource
+{
+       const char * description;       /**< short description of the resource */
+       u32 base;                       /**< Physical base address of the resource, as seen by Mali resources. */
+       u32 irq;                        /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h"   /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data );
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work );
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work );
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+
+/** @} */ /* end group _mali_osk_wq */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */  /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through  _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through  _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through  _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+#ifdef CONFIG_SLP_MALI_DBG
+u32 _mali_osk_mem_ioread32_cpu( volatile mali_io_address mapping, u32 offset );
+#endif
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+#ifdef CONFIG_SLP_MALI_DBG
+void _mali_osk_mem_iowrite32_relaxed_cpu( volatile mali_io_address addr, u32 offset, u32 val );
+#endif
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ *  _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async( _mali_osk_timer_t *tim );
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int    _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32    _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32    _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32    _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
+
+/** @brief Sleep  if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ *
+ * Put thread to sleep if the given \a codition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) );
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue );
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Disable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_disable(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
+
+
+/** @brief Release the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ */
+void _mali_osk_pm_dev_ref_dec(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * Will leave the cores powered off if they are already powered off.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ *
+ * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
+
+
+/** @brief Releasing the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+
+/** @} */ /* end group  _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h"           /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+       #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+       #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_osk_bitops.h b/drivers/gpu/arm/mali400/mali/common/mali_osk_bitops.h
new file mode 100644 (file)
index 0000000..843ff20
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+       MALI_DEBUG_ASSERT( bit < 32 );
+       MALI_DEBUG_ASSERT( NULL != addr );
+
+       (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+       MALI_DEBUG_ASSERT( bit < 32 );
+       MALI_DEBUG_ASSERT( NULL != addr );
+
+       (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+       MALI_DEBUG_ASSERT( bit < 32 );
+       return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+       u32 inverted;
+       u32 negated;
+       u32 isolated;
+       u32 leading_zeros;
+
+       /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range  0..31 */
+       inverted = ~value; /* zzz...z1000...0 */
+       /* Using count_trailing_zeros on inverted value -
+        * See ARM System Developers Guide for details of count_trailing_zeros */
+
+       /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+       negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+    /* negated = xxx...x1000...0 */
+
+       isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+       /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+        * Note that the output is zero if value was all 1s */
+
+       leading_zeros = _mali_osk_clz( isolated );
+
+       return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+       _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+       _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+       return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+       u32 total;
+
+       for ( total = 0; total < maxbit; total += 32, ++addr )
+       {
+               int result;
+               result = _mali_internal_find_first_zero_bit( *addr );
+
+               /* non-negative signifies the bit was found */
+               if ( result >= 0 )
+               {
+                       total += (u32)result;
+                       break;
+               }
+       }
+
+       /* Now check if we reached maxbit or above */
+       if ( total >= maxbit )
+       {
+               total = maxbit;
+       }
+
+       return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_osk_list.h b/drivers/gpu/arm/mali400/mali/common/mali_osk_list.h
new file mode 100644 (file)
index 0000000..67046b3
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+    next->prev = new_entry;
+    new_entry->next = next;
+    new_entry->prev = prev;
+    prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+    next->prev = prev;
+    prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+    list->next = list;
+    list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+    __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+    __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+    __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+    __mali_osk_list_del(list->prev, list->next);
+    _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+    return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+    __mali_osk_list_del(move_entry->prev, move_entry->next);
+    _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list( _mali_osk_list_t *old_list, _mali_osk_list_t *new_list )
+{
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+       if (!_mali_osk_list_empty(old_list))
+       {
+               new_list->next = old_list->next;
+               new_list->prev = old_list->prev;
+               new_list->next->prev = new_list;
+               new_list->prev->next = new_list;
+               old_list->next = old_list;
+               old_list->prev = old_list;
+       }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_osk_mali.h b/drivers/gpu/arm/mali400/mali/common/mali_osk_mali.h
new file mode 100644 (file)
index 0000000..c8d5825
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+struct _mali_osk_device_data
+{
+       /* Dedicated GPU memory range (physical). */
+       u32 dedicated_mem_start;
+       u32 dedicated_mem_size;
+
+       /* Shared GPU memory */
+       u32 shared_mem_size;
+
+       /* Frame buffer memory to be accessible by Mali GPU (physical) */
+       u32 fb_start;
+       u32 fb_size;
+
+       /* Report GPU utilization in this interval (specified in ms) */
+       u32 utilization_interval;
+
+       /* Function that will receive periodic GPU utilization numbers */
+       void (*utilization_handler)(unsigned int);
+};
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+u32 _mali_osk_resource_base_address(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of  _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_osk_profiling.h b/drivers/gpu/arm/mali400/mali/common/mali_osk_profiling.h
new file mode 100644 (file)
index 0000000..e2013b3
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING)  && defined(CONFIG_TRACEPOINTS) */
+
+ /* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING)  && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pm.c b/drivers/gpu/arm/mali400/mali/common/mali_pm.c
new file mode 100644 (file)
index 0000000..5d2b33f
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_group.h"
+
+static mali_bool mali_power_on = MALI_FALSE;
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+       _mali_osk_pm_dev_enable();
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+       _mali_osk_pm_dev_disable();
+}
+
+void mali_pm_core_event(enum mali_core_event core_event)
+{
+       MALI_DEBUG_ASSERT(MALI_CORE_EVENT_GP_START == core_event ||
+                         MALI_CORE_EVENT_PP_START == core_event ||
+                         MALI_CORE_EVENT_GP_STOP  == core_event ||
+                         MALI_CORE_EVENT_PP_STOP  == core_event);
+
+       if (MALI_CORE_EVENT_GP_START == core_event || MALI_CORE_EVENT_PP_START == core_event)
+       {
+               _mali_osk_pm_dev_ref_add();
+               if (mali_utilization_enabled())
+               {
+                       mali_utilization_core_start(_mali_osk_time_get_ns());
+               }
+       }
+       else
+       {
+               _mali_osk_pm_dev_ref_dec();
+               if (mali_utilization_enabled())
+               {
+                       mali_utilization_core_end(_mali_osk_time_get_ns());
+               }
+       }
+}
+
+/* Reset GPU after power up */
+static void mali_pm_reset_gpu(void)
+{
+       /* Reset all L2 caches */
+       mali_l2_cache_reset_all();
+
+       /* Reset all groups */
+       mali_scheduler_reset_all_groups();
+}
+
+void mali_pm_os_suspend(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+       mali_gp_scheduler_suspend();
+       mali_pp_scheduler_suspend();
+       mali_group_power_off();
+       mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_os_resume(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+       if (MALI_TRUE != mali_power_on)
+       {
+               mali_pm_reset_gpu();
+               mali_group_power_on();
+       }
+       mali_gp_scheduler_resume();
+       mali_pp_scheduler_resume();
+       mali_power_on = MALI_TRUE;
+}
+
+void mali_pm_runtime_suspend(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+       mali_group_power_off();
+       mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_runtime_resume(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+       if (MALI_TRUE != mali_power_on)
+       {
+               mali_pm_reset_gpu();
+               mali_group_power_on();
+       }
+       mali_power_on = MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pm.h b/drivers/gpu/arm/mali400/mali/common/mali_pm.h
new file mode 100644 (file)
index 0000000..393582e
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+enum mali_core_event
+{
+       MALI_CORE_EVENT_GP_START,
+       MALI_CORE_EVENT_GP_STOP,
+       MALI_CORE_EVENT_PP_START,
+       MALI_CORE_EVENT_PP_STOP
+};
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+
+void mali_pm_core_event(enum mali_core_event core_event);
+
+/* Callback functions registered for the runtime PMM system */
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pmu.c b/drivers/gpu/arm/mali400/mali/common/mali_pmu.c
new file mode 100644 (file)
index 0000000..dc1ba34
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core
+{
+       struct mali_hw_core hw_core;
+       u32 mali_registered_cores_power_mask;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+       PMU_REG_ADDR_MGMT_POWER_UP                  = 0x00,     /*< Power up register */
+       PMU_REG_ADDR_MGMT_POWER_DOWN                = 0x04,     /*< Power down register */
+       PMU_REG_ADDR_MGMT_STATUS                    = 0x08,     /*< Core sleep status register */
+       PMU_REG_ADDR_MGMT_INT_MASK                  = 0x0C,     /*< Interrupt mask register */
+       PMU_REGISTER_ADDRESS_SPACE_SIZE             = 0x10,     /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+       struct mali_pmu_core* pmu;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+       MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+       pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+       if (NULL != pmu)
+       {
+               pmu->mali_registered_cores_power_mask = mali_pmu_detect_mask(number_of_pp_cores, number_of_l2_caches);
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE))
+               {
+                       if (_MALI_OSK_ERR_OK == mali_pmu_reset(pmu))
+                       {
+                               mali_global_pmu_core = pmu;
+                               return pmu;
+                       }
+                       mali_hw_core_delete(&pmu->hw_core);
+               }
+               _mali_osk_free(pmu);
+       }
+
+       return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+
+       mali_hw_core_delete(&pmu->hw_core);
+       _mali_osk_free(pmu);
+       pmu = NULL;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+       /* Don't use interrupts - just poll status */
+       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_powerdown_all(struct mali_pmu_core *pmu)
+{
+       u32 stat;
+       u32 timeout;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT( pmu->mali_registered_cores_power_mask != 0 );
+       MALI_DEBUG_PRINT( 4, ("Mali PMU: power down (0x%08X)\n", pmu->mali_registered_cores_power_mask) );
+
+       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, pmu->mali_registered_cores_power_mask);
+
+       /* Wait for cores to be powered down (100 x 100us = 100ms) */
+       timeout = MALI_REG_POLL_COUNT_SLOW ;
+       do
+       {
+               /* Get status of sleeping cores */
+               stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+               stat &= pmu->mali_registered_cores_power_mask;
+               if( stat == pmu->mali_registered_cores_power_mask ) break; /* All cores we wanted are now asleep */
+               timeout--;
+       } while( timeout > 0 );
+
+       if( timeout == 0 )
+       {
+               return _MALI_OSK_ERR_TIMEOUT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_powerup_all(struct mali_pmu_core *pmu)
+{
+       u32 stat;
+       u32 timeout;
+       
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT( pmu->mali_registered_cores_power_mask != 0 ); /* Shouldn't be zero */
+       MALI_DEBUG_PRINT( 4, ("Mali PMU: power up (0x%08X)\n", pmu->mali_registered_cores_power_mask) );
+
+       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, pmu->mali_registered_cores_power_mask);
+
+       /* Wait for cores to be powered up (100 x 100us = 100ms) */
+       timeout = MALI_REG_POLL_COUNT_SLOW;
+       do
+       {
+               /* Get status of sleeping cores */
+               stat = mali_hw_core_register_read(&pmu->hw_core,PMU_REG_ADDR_MGMT_STATUS);
+               stat &= pmu->mali_registered_cores_power_mask;
+               if ( stat == 0 ) break; /* All cores we wanted are now awake */
+               timeout--;
+       } while ( timeout > 0 );
+
+       if ( timeout == 0 )
+       {
+               return _MALI_OSK_ERR_TIMEOUT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+       return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+       u32 mask = 0;
+
+       if (number_of_l2_caches == 1)
+       {
+               /* Mali-300 or Mali-400 */
+               u32 i;
+
+               /* GP */
+               mask = 0x01;
+
+               /* L2 cache */
+               mask |= 0x01<<1;
+
+               /* Set bit for each PP core */
+               for (i = 0; i < number_of_pp_cores; i++)
+               {
+                       mask |= 0x01<<(i+2);
+               }
+       }
+       else if (number_of_l2_caches > 1)
+       {
+               /* Mali-450 */
+
+               /* GP (including its L2 cache) */
+               mask = 0x01;
+
+               /* There is always at least one PP (including its L2 cache) */
+               mask |= 0x01<<1;
+
+               /* Additional PP cores in same L2 cache */
+               if (number_of_pp_cores >= 2)
+               {
+                       mask |= 0x01<<2;
+               }
+
+               /* Additional PP cores in a third L2 cache */
+               if (number_of_pp_cores >= 5)
+               {
+                       mask |= 0x01<<3;
+               }
+       }
+
+       MALI_DEBUG_PRINT(4, ("Mali PMU: Power mask is 0x%08X (%u + %u)\n", mask, number_of_pp_cores, number_of_l2_caches));
+
+       return mask;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pmu.h b/drivers/gpu/arm/mali400/mali/common/mali_pmu.h
new file mode 100644 (file)
index 0000000..399755d
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ * 
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief It deallocates the PMU resource
+ * 
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_powerdown_all(struct mali_pmu_core *pmu);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ * 
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_powerup_all(struct mali_pmu_core *pmu);
+
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ * 
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pp.c b/drivers/gpu/arm/mali400/mali/common/mali_pp.c
new file mode 100644 (file)
index 0000000..be0bb4b
--- /dev/null
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core* mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES];
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+       struct mali_pp_core* core = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+       if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES)
+       {
+               MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+               return NULL;
+       }
+
+       core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+       if (NULL != core)
+       {
+               core->core_id = mali_global_num_pp_cores;
+               core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+               core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK))
+               {
+                       _mali_osk_errcode_t ret;
+
+                       if (!is_virtual)
+                       {
+                               ret = mali_pp_reset(core);
+                       }
+                       else
+                       {
+                               ret = _MALI_OSK_ERR_OK;
+                       }
+
+                       if (_MALI_OSK_ERR_OK == ret)
+                       {
+                               ret = mali_group_add_pp_core(group, core);
+                               if (_MALI_OSK_ERR_OK == ret)
+                               {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+                                       core->irq = _mali_osk_irq_init(resource->irq,
+                                                                      mali_group_upper_half_pp,
+                                                                      group,
+                                                                      mali_pp_irq_probe_trigger,
+                                                                      mali_pp_irq_probe_ack,
+                                                                      core,
+                                                                      "mali_pp_irq_handlers");
+                                       if (NULL != core->irq)
+                                       {
+                                               mali_global_pp_cores[mali_global_num_pp_cores] = core;
+                                               mali_global_num_pp_cores++;
+
+                                               return core;
+                                       }
+                                       else
+                                       {
+                                               MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+                                       }
+                                       mali_group_remove_pp_core(group);
+                               }
+                               else
+                               {
+                                       MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+                               }
+                       }
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       }
+       else
+       {
+               MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       _mali_osk_irq_term(core->irq);
+       mali_hw_core_delete(&core->hw_core);
+
+       /* Remove core from global list */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_PP_CORES; i++)
+       {
+               if (mali_global_pp_cores[i] == core)
+               {
+                       mali_global_pp_cores[i] = NULL;
+                       mali_global_num_pp_cores--;
+                       break;
+               }
+       }
+
+       _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       /* Will only send the stop bus command, and not wait for it to complete */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Send the stop bus command. */
+       mali_pp_stop_bus(core);
+
+       /* Wait for bus to be stopped */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+       {
+               if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+                       break;
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i)
+       {
+               MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] =
+{
+       0x0, /* Renderer List Address Register */
+       0x0, /* Renderer State Word Base Address Register */
+       0x0, /* Renderer Vertex Base Register */
+       0x2, /* Feature Enable Register */
+       0x0, /* Z Clear Value Register */
+       0x0, /* Stencil Clear Value Register */
+       0x0, /* ABGR Clear Value 0 Register */
+       0x0, /* ABGR Clear Value 1 Register */
+       0x0, /* ABGR Clear Value 2 Register */
+       0x0, /* ABGR Clear Value 3 Register */
+       0x0, /* Bounding Box Left Right Register */
+       0x0, /* Bounding Box Bottom Register */
+       0x0, /* FS Stack Address Register */
+       0x0, /* FS Stack Size and Initial Value Register */
+       0x0, /* Reserved */
+       0x0, /* Reserved */
+       0x0, /* Origin Offset X Register */
+       0x0, /* Origin Offset Y Register */
+       0x75, /* Subpixel Specifier Register */
+       0x0, /* Tiebreak mode Register */
+       0x0, /* Polygon List Format Register */
+       0x0, /* Scaling Register */
+       0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] =
+{
+       0x0, /* WBx Source Select Register */
+       0x0, /* WBx Target Address Register */
+       0x0, /* WBx Target Pixel Format Register */
+       0x0, /* WBx Target AA Format Register */
+       0x0, /* WBx Target Layout */
+       0x0, /* WBx Target Scanline Length */
+       0x0, /* WBx Target Flags Register */
+       0x0, /* WBx MRT Enable Register */
+       0x0, /* WBx MRT Offset Register */
+       0x0, /* WBx Global Test Enable Register */
+       0x0, /* WBx Global Test Reference Value Register */
+       0x0  /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+       /* Bus must be stopped before calling this function */
+       const u32 reset_invalid_value = 0xC0FFE000;
+       const u32 reset_check_value = 0xC01A0000;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+       MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+       /* Set register to a bogus value. The register will be used to detect when reset is complete */
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+       /* Force core to reset */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+       /* Wait for reset to be complete */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+       {
+               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW))
+               {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i)
+       {
+               MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+       }
+
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+       int i;
+       u32 rawstat = 0;
+
+       /* TODO: For virtual Mali-450 core, check that PP active in STATUS is 0 (this must be initiated from group) */
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+       {
+               rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+               if (rawstat & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)
+               {
+                       break;
+               }
+       }
+
+       if (i == MALI_REG_POLL_COUNT_FAST)
+       {
+               MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+                                core->hw_core.description, rawstat));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+       mali_pp_reset_async(core);
+       return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
+{
+       u32 num_frame_registers;
+       u32 relative_address;
+       u32 start_index;
+       u32 nr_of_regs;
+       u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+       u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+       u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+       u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+       core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
+       core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Write frame registers */
+       num_frame_registers = (_MALI_PRODUCT_ID_MALI200 == mali_kernel_core_get_product_id()) ? MALI_PP_MALI200_NUM_FRAME_REGISTERS : MALI_PP_MALI400_NUM_FRAME_REGISTERS;
+
+       /*
+        * There are two frame registers which are different for each sub job:
+        * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+        * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+        */
+       mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+       /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+       if (!mali_pp_job_is_virtual(job) || restart_virtual)
+       {
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+       }
+
+       /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+       relative_address = MALI200_REG_ADDR_RSW;
+       start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+       nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+       mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+               relative_address, &frame_registers[start_index],
+               nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* MALI200_REG_ADDR_STACK_SIZE */
+       relative_address = MALI200_REG_ADDR_STACK_SIZE;
+       start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+       mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+               relative_address, frame_registers[start_index],
+               mali_frame_registers_reset_values[start_index]);
+
+       /* Skip 2 reserved registers */
+
+       /* Write remaining registers */
+       relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+       start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+       nr_of_regs = num_frame_registers - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+       mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+               relative_address, &frame_registers[start_index],
+               nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* Write WBx registers */
+       if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
+       {
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
+       {
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
+       {
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+       {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+       {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+       /* Adding barrier to make sure all rester writes are finished */
+       _mali_osk_write_mem_barrier();
+
+       /* This is the command that starts the core. */
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+       /* Adding barrier to make sure previous rester writes is finished */
+       _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index)
+{
+       if (MALI_MAX_NUMBER_OF_PP_CORES > index)
+       {
+               return mali_global_pp_cores[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+       return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+       struct mali_pp_core *core = (struct mali_pp_core *)data;
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+       _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+       struct mali_pp_core *core = (struct mali_pp_core *)data;
+       u32 irq_readout;
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+       if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+       {
+               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+               _mali_osk_mem_barrier();
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+       MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *core, struct mali_pp_job *job, u32 subjob)
+{
+       u32 val0 = 0;
+       u32 val1 = 0;
+#if defined(CONFIG_MALI400_PROFILING)
+       int counter_index = COUNTER_FP0_C0 + (2 * core->core_id);
+#endif
+
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+       {
+               val0 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+
+               mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               /*todo: check if the group is virtual - in such case, does it make sense to send a HW counter ?*/
+               _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+       {
+               val1 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+               mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               /*todo: check if the group is virtual - in such case, does it make sense to send a HW counter ?*/
+               _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+       }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+       return n;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pp.h b/drivers/gpu/arm/mali400/mali/common/mali_pp.h
new file mode 100644 (file)
index 0000000..d654df2
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES        9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core
+{
+       struct mali_hw_core  hw_core;           /**< Common for all HW cores */
+       _mali_osk_irq_t     *irq;               /**< IRQ handler */
+       u32                  core_id;           /**< Unique core ID */
+       u32                  counter_src0_used; /**< The selected performance counter 0 when a job is running */
+       u32                  counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t * resource, struct mali_group *group, mali_bool is_virtual);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return core->core_id;
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+void mali_pp_update_performance_counters(struct mali_pp_core *core, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+{
+       return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
+{
+       u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pp_job.c b/drivers/gpu/arm/mali400/mali/common/mali_pp_job.c
new file mode 100644 (file)
index 0000000..f863e9d
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+{
+       struct mali_pp_job *job;
+       u32 perf_counter_flag;
+
+       job = _mali_osk_malloc(sizeof(struct mali_pp_job));
+       if (NULL != job)
+       {
+               u32 i;
+
+               if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s)))
+               {
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS)
+               {
+                       MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               if (!mali_pp_job_use_no_notification(job))
+               {
+                       job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+                       if (NULL == job->finished_notification)
+                       {
+                               _mali_osk_free(job);
+                               return NULL;
+                       }
+               }
+               else
+               {
+                       job->finished_notification = NULL;
+               }
+
+               perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+               /* case when no counters came from user space
+                * so pass the debugfs / DS-5 provided global ones to the job object */
+               if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+                               (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)))
+               {
+                       mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0());
+                       mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1());
+               }
+
+               _mali_osk_list_init(&job->list);
+               job->session = session;
+               _mali_osk_list_init(&job->session_list);
+               job->id = id;
+
+               for (i = 0; i < job->uargs.num_cores; i++)
+               {
+                       job->perf_counter_value0[i] = 0;
+                       job->perf_counter_value1[i] = 0;
+               }
+               job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+               job->sub_jobs_started = 0;
+               job->sub_jobs_completed = 0;
+               job->sub_job_errors = 0;
+               job->pid = _mali_osk_get_pid();
+               job->tid = _mali_osk_get_tid();
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+               job->sync_point = NULL;
+               job->pre_fence = NULL;
+               job->sync_work = NULL;
+#endif
+#endif
+
+               return job;
+       }
+
+       return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       if (NULL != job->pre_fence) sync_fence_put(job->pre_fence);
+       if (NULL != job->sync_point) sync_fence_put(job->sync_point->fence);
+#endif
+#endif
+       if (NULL != job->finished_notification)
+       {
+               _mali_osk_notification_delete(job->finished_notification);
+       }
+       _mali_osk_free(job);
+}
+
+u32 mali_pp_job_get_pp_counter_src0(void)
+{
+       return pp_counter_src0;
+}
+
+mali_bool mali_pp_job_set_pp_counter_src0(u32 counter)
+{
+       pp_counter_src0 = counter;
+
+       return MALI_TRUE;
+}
+
+u32 mali_pp_job_get_pp_counter_src1(void)
+{
+       return pp_counter_src1;
+}
+
+mali_bool mali_pp_job_set_pp_counter_src1(u32 counter)
+{
+       pp_counter_src1 = counter;
+
+       return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pp_job.h b/drivers/gpu/arm/mali400/mali/common/mali_pp_job.h
new file mode 100644 (file)
index 0000000..f8a630e
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+#include <linux/sync.h>
+#endif
+#endif
+#include "mali_dlbu.h"
+
+/**
+ * The structure represents a PP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job
+{
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       _mali_osk_list_t session_list;                     /**< Used to link jobs together in the session job list */
+       _mali_uk_pp_start_job_s uargs;                     /**< Arguments from user space */
+       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
+       u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+       u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+       u32 sub_jobs_num;                                  /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+       u32 sub_jobs_started;                              /**< Total number of sub-jobs started (always started in ascending order) */
+       u32 sub_jobs_completed;                            /**< Number of completed sub-jobs in this superjob */
+       u32 sub_job_errors;                                /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       mali_sync_pt *sync_point;                          /**< Sync point to signal on completion */
+       struct sync_fence_waiter sync_waiter;              /**< Sync waiter for async wait */
+       _mali_osk_wq_work_t *sync_work;                    /**< Work to schedule in callback */
+       struct sync_fence *pre_fence;                      /**< Sync fence this job must wait for */
+#endif
+#endif
+};
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_pp_counter_src0(void);
+mali_bool mali_pp_job_set_pp_counter_src0(u32 counter);
+u32 mali_pp_job_get_pp_counter_src1(void);
+mali_bool mali_pp_job_set_pp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+       return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+       return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+       return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+       return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+       return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+       return job->tid;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+       return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+       return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
+{
+       return 0 == job->uargs.num_cores;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+       if (mali_pp_job_is_virtual(job))
+       {
+               return MALI_DLBU_VIRT_ADDR;
+       }
+       else if (0 == sub_job)
+       {
+               return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+       }
+       else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+       {
+               return job->uargs.frame_registers_addr_frame[sub_job - 1];
+       }
+
+       return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+       if (0 == sub_job)
+       {
+               return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+       }
+       else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+       {
+               return job->uargs.frame_registers_addr_stack[sub_job - 1];
+       }
+
+       return 0;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+       return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+       return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+       return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+       job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+       job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+       job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+       return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+       return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+   Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+       u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+       job->sub_jobs_started   += jobs_remaining;
+       job->sub_jobs_completed += jobs_remaining;
+       job->sub_job_errors     += jobs_remaining;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+       return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+       return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+       return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+       /* Assert that we are marking the "first unstarted sub job" as started */
+       MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+       job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_not_stated(struct mali_pp_job *job, u32 sub_job)
+{
+       /* This is only safe on Mali-200. */
+       MALI_DEBUG_ASSERT(_MALI_PRODUCT_ID_MALI200 == mali_kernel_core_get_product_id());
+
+       job->sub_jobs_started--;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+       job->sub_jobs_completed++;
+       if ( MALI_FALSE == success )
+       {
+               job->sub_job_errors++;
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+       if ( 0 == job->sub_job_errors )
+       {
+               return MALI_TRUE;
+       }
+       return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_active_barrier(struct mali_pp_job *job)
+{
+       return job->uargs.flags & _MALI_PP_JOB_FLAG_BARRIER ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_barrier_enforced(struct mali_pp_job *job)
+{
+       job->uargs.flags &= ~_MALI_PP_JOB_FLAG_BARRIER;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+{
+       return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+       return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job)
+{
+       return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job)
+{
+       return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+       return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+       return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_src0(struct mali_pp_job *job, u32 src)
+{
+       job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_src1(struct mali_pp_job *job, u32 src)
+{
+       job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+       job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+       job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+       if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pp_scheduler.c b/drivers/gpu/arm/mali400/mali/common/mali_pp_scheduler.c
new file mode 100644 (file)
index 0000000..3ba1012
--- /dev/null
@@ -0,0 +1,1335 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+
+#if defined(CONFIG_SYNC)
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
+#endif
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
+
+static mali_bool mali_pp_scheduler_is_suspended(void);
+static void mali_pp_scheduler_do_schedule(void *arg);
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+static void mali_pp_scheduler_do_job_delete(void *arg);
+#endif
+
+static u32 pp_version = 0;
+
+/* Physical job queue */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue);              /* List of physical jobs with some unscheduled work */
+static u32 job_queue_depth = 0;
+
+/* Physical groups */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);     /* List of physical groups with working jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);        /* List of physical groups with idle jobs on the pp core */
+
+/* Virtual job queue (Mali-450 only) */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(virtual_job_queue);      /* List of unstarted jobs for the virtual group */
+static u32 virtual_job_queue_depth = 0;
+
+/* Virtual group (Mali-450 only) */
+static struct mali_group *virtual_group = NULL;                 /* Virtual group (if any) */
+static mali_bool virtual_group_working = MALI_FALSE;            /* Flag which indicates whether the virtual group is working or idle */
+
+/* Number of physical cores */
+static u32 num_cores = 0;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static _mali_osk_lock_t *pp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+MALI_DEBUG_CODE(static u32 pp_scheduler_lock_owner = 0);
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_schedule = NULL;
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
+static _mali_osk_lock_t *pp_scheduler_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
+#endif
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+       struct mali_group *group;
+       struct mali_pp_core *pp_core;
+       _mali_osk_lock_flags_t lock_flags;
+       u32 num_groups;
+       u32 i;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#else
+       lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#endif
+
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+       _MALI_OSK_INIT_LIST_HEAD(&group_list_working);
+       _MALI_OSK_INIT_LIST_HEAD(&group_list_idle);
+
+       _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue);
+
+       pp_scheduler_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+       if (NULL == pp_scheduler_lock)
+       {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == pp_scheduler_working_wait_queue)
+       {
+               _mali_osk_lock_term(pp_scheduler_lock);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       pp_scheduler_wq_schedule = _mali_osk_wq_create_work(mali_pp_scheduler_do_schedule, NULL);
+       if (NULL == pp_scheduler_wq_schedule)
+       {
+               _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+               _mali_osk_lock_term(pp_scheduler_lock);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
+       if (NULL == pp_scheduler_wq_job_delete)
+       {
+               _mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
+               _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+               _mali_osk_lock_term(pp_scheduler_lock);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       pp_scheduler_job_delete_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == pp_scheduler_job_delete_lock)
+       {
+               _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+               _mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
+               _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+               _mali_osk_lock_term(pp_scheduler_lock);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+#endif
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       /* Do we have a virtual group? */
+       for (i = 0; i < num_groups; i++)
+       {
+               group = mali_group_get_glob_group(i);
+
+               if (mali_group_is_virtual(group))
+               {
+                       MALI_DEBUG_PRINT(3, ("Found virtual group %p\n", group));
+
+                       virtual_group = group;
+                       break;
+               }
+       }
+
+       /* Find all the available PP cores */
+       for (i = 0; i < num_groups; i++)
+       {
+               group = mali_group_get_glob_group(i);
+               pp_core = mali_group_get_pp_core(group);
+
+               if (NULL != pp_core && !mali_group_is_virtual(group))
+               {
+                       if (0 == pp_version)
+                       {
+                               /* Retrieve PP version from the first available PP core */
+                               pp_version = mali_pp_core_get_version(pp_core);
+                       }
+
+                       if (NULL != virtual_group)
+                       {
+                               /* Add all physical PP cores to the virtual group */
+                               mali_group_lock(virtual_group);
+                               group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+                               mali_group_add_group(virtual_group, group);
+                               mali_group_unlock(virtual_group);
+                       }
+                       else
+                       {
+                               _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
+                       }
+
+                       num_cores++;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+       struct mali_group *group, *temp;
+
+       /* Delete all groups owned by scheduler */
+       if (NULL != virtual_group)
+       {
+               mali_group_delete(virtual_group);
+       }
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list)
+       {
+               mali_group_delete(group);
+       }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       _mali_osk_lock_term(pp_scheduler_job_delete_lock);
+       _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+#endif
+
+       _mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
+       _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+       _mali_osk_lock_term(pp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+       if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+       {
+               /* Non-interruptable lock failed: this should never happen. */
+               MALI_DEBUG_ASSERT(0);
+       }
+       MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken\n"));
+       MALI_DEBUG_ASSERT(0 == pp_scheduler_lock_owner);
+       MALI_DEBUG_CODE(pp_scheduler_lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock\n"));
+       MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+       MALI_DEBUG_CODE(pp_scheduler_lock_owner = 0);
+       _mali_osk_lock_signal(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_pp_scheduler_assert_locked(void)
+{
+       MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+}
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() mali_pp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED()
+#endif
+
+/**
+ * Returns a physical job if a physical job is ready to run (no barrier present)
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+       if (!_mali_osk_list_empty(&job_queue))
+       {
+               struct mali_pp_job *job;
+
+               MALI_DEBUG_ASSERT(job_queue_depth > 0);
+               job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);
+
+               if (!mali_pp_job_has_active_barrier(job))
+               {
+                       return job;
+               }
+       }
+
+       return NULL;
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT(job_queue_depth > 0);
+
+       /* Remove job from queue */
+       if (!mali_pp_job_has_unstarted_sub_jobs(job))
+       {
+               /* All sub jobs have been started: remove job from queue */
+               _mali_osk_list_delinit(&job->list);
+       }
+
+       --job_queue_depth;
+}
+
+/**
+ * Returns a virtual job if a virtual job is ready to run (no barrier present)
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_job(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT_POINTER(virtual_group);
+
+       if (!_mali_osk_list_empty(&virtual_job_queue))
+       {
+               struct mali_pp_job *job;
+
+               MALI_DEBUG_ASSERT(virtual_job_queue_depth > 0);
+               job = _MALI_OSK_LIST_ENTRY(virtual_job_queue.next, struct mali_pp_job, list);
+
+               if (!mali_pp_job_has_active_barrier(job))
+               {
+                       return job;
+               }
+       }
+
+       return NULL;
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_virtual_job(struct mali_pp_job *job)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT(virtual_job_queue_depth > 0);
+
+       /* Remove job from queue */
+       _mali_osk_list_delinit(&job->list);
+       --virtual_job_queue_depth;
+}
+
+/**
+ * Checks if the criteria is met for removing a physical core from virtual group
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT(NULL != virtual_group);
+       MALI_ASSERT_GROUP_LOCKED(virtual_group);
+       /*
+        * The criteria for taking out a physical group from a virtual group are the following:
+        * - There virtual group is idle
+        * - There are currently no physical groups (idle and working)
+        * - There are physical jobs to be scheduled (without a barrier)
+        */
+       return (!virtual_group_working) &&
+              _mali_osk_list_empty(&group_list_idle) &&
+              _mali_osk_list_empty(&group_list_working) &&
+              (NULL != mali_pp_scheduler_get_physical_job());
+}
+
+MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+       if (!_mali_osk_list_empty(&group_list_idle))
+       {
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list\n"));
+               return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+       }
+       else if (NULL != virtual_group)
+       {
+               MALI_ASSERT_GROUP_LOCKED(virtual_group);
+               if (mali_pp_scheduler_can_move_virtual_to_physical())
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group\n"));
+                       return mali_group_acquire_group(virtual_group);
+               }
+       }
+
+       return NULL;
+}
+
+static void mali_pp_scheduler_schedule(void)
+{
+       struct mali_group* physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS-1];
+       struct mali_pp_job* physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS-1];
+       u32 physical_subjobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS-1];
+       int num_physical_jobs_to_start = 0;
+       int i;
+
+       if (NULL != virtual_group)
+       {
+               /* Need to lock the virtual group because we might need to grab a physical group from it */
+               mali_group_lock(virtual_group);
+       }
+
+       mali_pp_scheduler_lock();
+       if (pause_count > 0)
+       {
+               /* Scheduler is suspended, don't schedule any jobs */
+               mali_pp_scheduler_unlock();
+               if (NULL != virtual_group)
+               {
+                       mali_group_unlock(virtual_group);
+               }
+               return;
+       }
+
+       /* Find physical job(s) to schedule first */
+       while (1)
+       {
+               struct mali_group *group;
+               struct mali_pp_job *job;
+               u32 subjob;
+
+               job = mali_pp_scheduler_get_physical_job();
+               if (NULL == job)
+               {
+                       break; /* No job, early out */
+               }
+
+               MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+               MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+               MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
+
+               /* Acquire a physical group, either from the idle list or from the virtual group.
+                * In case the group was acquired from the virtual group, it's state will be
+                * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
+               group = mali_pp_scheduler_acquire_physical_group();
+               if (NULL == group)
+               {
+                       /* Could not get a group to run the job on, early out */
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
+                       break;
+               }
+
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p\n", group));
+
+               /* Mark subjob as started */
+               subjob = mali_pp_job_get_first_unstarted_sub_job(job);
+               mali_pp_job_mark_sub_job_started(job, subjob);
+
+               /* Remove job from queue (if we now got the last subjob) */
+               mali_pp_scheduler_dequeue_physical_job(job);
+
+               /* Move group to working list */
+               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
+
+               /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding */
+               physical_groups_to_start[num_physical_jobs_to_start] = group;
+               physical_jobs_to_start[num_physical_jobs_to_start] = job;
+               physical_subjobs_to_start[num_physical_jobs_to_start] = subjob;
+               ++num_physical_jobs_to_start;
+
+               MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
+       }
+
+       /* See if we have a virtual job to schedule */
+       if (NULL != virtual_group)
+       {
+               if (!virtual_group_working)
+               {
+                       struct mali_pp_job *job = mali_pp_scheduler_get_virtual_job();
+                       if (NULL != job)
+                       {
+                               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+                               MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+                               MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+
+                               /* Mark the one and only subjob as started */
+                               mali_pp_job_mark_sub_job_started(job, 0);
+
+                               /* Remove job from queue */
+                               mali_pp_scheduler_dequeue_virtual_job(job);
+
+                               /* Virtual group is now working */
+                               virtual_group_working = MALI_TRUE;
+
+                               /*
+                                * We no longer need the scheduler lock,
+                                * but we still need the virtual lock in order to start the virtual job
+                                */
+                               mali_pp_scheduler_unlock();
+
+                               /* Start job */
+                               if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(virtual_group, job, 0))
+                               {
+                                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from schedule)\n",
+                                                            mali_pp_job_get_id(job), job, 1,
+                                                            mali_pp_job_get_sub_job_count(job)));
+                               }
+                               else
+                               {
+                                       MALI_DEBUG_ASSERT(0);
+                               }
+
+                               /* And now we are all done with the virtual_group lock as well */
+                               mali_group_unlock(virtual_group);
+                       }
+                       else
+                       {
+                               /* No virtual job, release the two locks we are holding */
+                               mali_pp_scheduler_unlock();
+                               mali_group_unlock(virtual_group);
+                       }
+               }
+               else
+               {
+                       /* Virtual core busy, release the two locks we are holding */
+                       mali_pp_scheduler_unlock();
+                       mali_group_unlock(virtual_group);
+               }
+
+       }
+       else
+       {
+               /* There is no virtual group, release the only lock we are holding */
+               mali_pp_scheduler_unlock();
+       }
+
+       /*
+        * Now we have released the scheduler lock, and we are ready to kick of the actual starting of the
+        * physical jobs.
+        * The reason we want to wait until we have released the scheduler lock is that job start actually
+        * may take quite a bit of time (quite many registers needs to be written). This will allow new jobs
+        * from user space to come in, and post processing of other PP jobs to happen at the same time as we
+        * start jobs.
+        */
+       for (i = 0; i < num_physical_jobs_to_start; i++)
+       {
+               struct mali_group *group = physical_groups_to_start[i];
+               struct mali_pp_job *job  = physical_jobs_to_start[i];
+               u32 sub_job              = physical_subjobs_to_start[i];
+
+               MALI_DEBUG_ASSERT_POINTER(group);
+               MALI_DEBUG_ASSERT_POINTER(job);
+
+               mali_group_lock(group);
+
+               /* In case this group was acquired from a virtual core, update it's state to IDLE */
+               group->state = MALI_GROUP_STATE_IDLE;
+
+               if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(group, job, sub_job))
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule)\n",
+                                            mali_pp_job_get_id(job), job, sub_job + 1,
+                                            mali_pp_job_get_sub_job_count(job)));
+               }
+               else
+               {
+                       MALI_DEBUG_ASSERT(0);
+               }
+
+               mali_group_unlock(group);
+
+               /* @@@@ todo: remove the return value from mali_group_start_xx_job, since we can't fail on Mali-300++ */
+       }
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
+{
+       if (MALI_FALSE == mali_pp_job_use_no_notification(job))
+       {
+               u32 i;
+               u32 sub_jobs = mali_pp_job_get_sub_job_count(job);
+               mali_bool success = mali_pp_job_was_success(job);
+
+               _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
+               _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+               jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+               if (MALI_TRUE == success)
+               {
+                       jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+               }
+               else
+               {
+                       jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+               }
+
+               for (i = 0; i < sub_jobs; i++)
+               {
+                       jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+                       jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+               }
+
+               mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
+               job->finished_notification = NULL;
+       }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       if (MALI_TRUE == deferred)
+       {
+               /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
+               _mali_osk_lock_wait(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); /* This job object should not be on any list */
+               _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
+
+               _mali_osk_lock_signal(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+               _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
+       }
+       else
+       {
+               mali_pp_job_delete(job);
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
+       mali_pp_job_delete(job);
+#endif
+}
+
+static void mali_pp_scheduler_do_schedule(void *arg)
+{
+       MALI_IGNORE(arg);
+
+       mali_pp_scheduler_schedule();
+}
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+static void mali_pp_scheduler_do_job_delete(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+
+       MALI_IGNORE(arg);
+
+       _mali_osk_lock_wait(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+       /*
+        * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
+        * we start deleting the job objects (without any locks held
+        */
+       _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
+
+       _mali_osk_lock_signal(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list)
+       {
+               mali_pp_job_delete(job); /* delete the job object itself */
+       }
+}
+#endif
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success)
+{
+       mali_bool job_is_done;
+       mali_bool barrier_enforced = MALI_FALSE;
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s)\n",
+                            mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
+                            mali_pp_job_get_id(job),
+                            job, sub_job + 1,
+                            mali_pp_job_get_sub_job_count(job),
+                            success ? "success" : "failure"));
+       MALI_ASSERT_GROUP_LOCKED(group);
+       mali_pp_scheduler_lock();
+
+       mali_pp_job_mark_sub_job_completed(job, success);
+
+       MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job) == mali_group_is_virtual(group));
+
+       job_is_done = mali_pp_job_is_complete(job);
+
+       if (job_is_done)
+       {
+               struct mali_session_data *session = mali_pp_job_get_session(job);
+               struct mali_pp_job *job_head;
+
+               /* Remove job from session list */
+               _mali_osk_list_del(&job->session_list);
+
+               /* Send notification back to user space */
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X)\n",
+                                    mali_pp_job_is_virtual(job) ? "virtual" : "physical",
+                                    mali_pp_job_get_id(job), job));
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+               if (job->sync_point)
+               {
+                       int error;
+                       if (success) error = 0;
+                       else error = -EFAULT;
+                       MALI_DEBUG_PRINT(4, ("Sync: Signal %spoint for job %d\n",
+                                            success ? "" : "failed ",
+                                            mali_pp_job_get_id(job)));
+                       mali_sync_signal_pt(job->sync_point, error);
+               }
+#endif
+#endif
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+               mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
+#else
+               mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+#endif
+
+               mali_pm_core_event(MALI_CORE_EVENT_PP_STOP);
+
+               /* Resolve any barriers */
+               if (!_mali_osk_list_empty(&session->job_list))
+               {
+                       job_head = _MALI_OSK_LIST_ENTRY(session->job_list.next, struct mali_pp_job, session_list);
+                       if (mali_pp_job_has_active_barrier(job_head))
+                       {
+                               barrier_enforced = MALI_TRUE;
+                               mali_pp_job_barrier_enforced(job_head);
+                       }
+               }
+       }
+
+       /* If paused, then this was the last job, so wake up sleeping workers */
+       if (pause_count > 0)
+       {
+               /* Wake up sleeping workers. Their wake-up condition is that
+                * num_slots == num_slots_idle, so unless we are done working, no
+                * threads will actually be woken up.
+                */
+               _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+               mali_pp_scheduler_unlock();
+               return;
+       }
+
+       if (barrier_enforced)
+       {
+               /* A barrier was resolved, so schedule previously blocked jobs */
+               _mali_osk_wq_schedule_work(pp_scheduler_wq_schedule);
+
+               /* TODO: Subjob optimisation */
+       }
+
+       /* Recycle variables */
+       job = NULL;
+       sub_job = 0;
+
+       if (mali_group_is_virtual(group))
+       {
+               /* Virtual group */
+
+               /* Now that the virtual group is idle, check if we should reconfigure */
+               struct mali_pp_job *physical_job = NULL;
+               struct mali_group *physical_group = NULL;
+
+               /* Obey the policy */
+               virtual_group_working = MALI_FALSE;
+
+               if (mali_pp_scheduler_can_move_virtual_to_physical())
+               {
+                       /* There is a runnable physical job and we can acquire a physical group */
+                       physical_job = mali_pp_scheduler_get_physical_job();
+                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
+
+                       /* Mark subjob as started */
+                       sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
+                       mali_pp_job_mark_sub_job_started(physical_job, sub_job);
+
+                       /* Remove job from queue (if we now got the last subjob) */
+                       mali_pp_scheduler_dequeue_physical_job(physical_job);
+
+                       /* Acquire a physical group from the virtual group
+                        * It's state will be LEAVING_VIRTUAL and must be set to IDLE before it can be used */
+                       physical_group = mali_group_acquire_group(virtual_group);
+
+                       /* Move physical group to the working list, as we will soon start a job on it */
+                       _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
+               }
+
+               /* Start the next virtual job */
+               job = mali_pp_scheduler_get_virtual_job();
+               if (NULL != job)
+               {
+                       /* There is a runnable virtual job */
+                       MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+                       MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+
+                       mali_pp_job_mark_sub_job_started(job, 0);
+
+                       /* Remove job from queue */
+                       mali_pp_scheduler_dequeue_virtual_job(job);
+
+                       /* Virtual group is now working */
+                       virtual_group_working = MALI_TRUE;
+
+                       mali_pp_scheduler_unlock();
+
+                       /* Start job */
+                       if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(group, job, 0))
+                       {
+                               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done)\n",
+                                                    mali_pp_job_get_id(job), job, 1,
+                                                    mali_pp_job_get_sub_job_count(job)));
+                       }
+                       else
+                       {
+                               MALI_DEBUG_ASSERT(0);
+                       }
+               }
+               else
+               {
+                       mali_pp_scheduler_unlock();
+               }
+
+               /* Start a physical job (if we acquired a physical group earlier) */
+               if (NULL != physical_job && NULL != physical_group)
+               {
+                       mali_group_lock(physical_group);
+
+                       /* Set the group state from LEAVING_VIRTUAL to IDLE to complete the transition */
+                       physical_group->state = MALI_GROUP_STATE_IDLE;
+
+                       /* Start job on core */
+                       if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(physical_group, physical_job, sub_job))
+                       {
+                               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done)\n",
+                                                    mali_pp_job_get_id(physical_job), physical_job, sub_job + 1,
+                                                    mali_pp_job_get_sub_job_count(physical_job)));
+                       }
+                       else
+                       {
+                               /* @@@@ todo: this cant fail on Mali-300+, no need to implement put back of job */
+                               MALI_DEBUG_ASSERT(0);
+                       }
+
+                       mali_group_unlock(physical_group);
+               }
+       }
+       else
+       {
+               /* Physical group */
+               job = mali_pp_scheduler_get_physical_job();
+               if (NULL != job)
+               {
+                       /* There is a runnable physical job */
+                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+                       /* Mark subjob as started */
+                       sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+                       mali_pp_job_mark_sub_job_started(job, sub_job);
+
+                       /* Remove job from queue (if we now got the last subjob) */
+                       mali_pp_scheduler_dequeue_physical_job(job);
+
+                       mali_pp_scheduler_unlock();
+
+                       /* Group is already on the working list, so start the job */
+                       if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(group, job, sub_job))
+                       {
+                               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done)\n",
+                                                    mali_pp_job_get_id(job), job, sub_job + 1,
+                                                    mali_pp_job_get_sub_job_count(job)));
+                       }
+                       else
+                       {
+                               MALI_DEBUG_ASSERT(0);
+                       }
+               }
+               else if (NULL != virtual_group)
+               {
+                       /* Rejoin virtual group */
+                       /* TODO: In the future, a policy check might be useful here */
+
+                       /* We're no longer needed on the scheduler list */
+                       _mali_osk_list_delinit(&(group->pp_scheduler_list));
+
+                       /* Make sure no interrupts are handled for this group during
+                        * the transition from physical to virtual */
+                       group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+                       mali_pp_scheduler_unlock();
+                       mali_group_unlock(group);
+
+                       mali_group_lock(virtual_group);
+                       /* TODO: Take group lock also? */
+                       mali_group_add_group(virtual_group, group);
+                       mali_group_unlock(virtual_group);
+
+                       /* We need to return from this function with the group lock held */
+                       /* TODO: optimise! */
+                       mali_group_lock(group);
+               }
+               else
+               {
+                       _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+                       mali_pp_scheduler_unlock();
+               }
+       }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+       mali_pp_scheduler_lock();
+       pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+       mali_pp_scheduler_unlock();
+
+       /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+        * mali_pp_scheduler_suspended() function will be called. This will return true
+        * iff state is idle and pause_count > 0, so if the core is active this
+        * will not do anything.
+        */
+       _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+       mali_pp_scheduler_lock();
+       pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+       mali_pp_scheduler_unlock();
+       if (0 == pause_count)
+       {
+               mali_pp_scheduler_schedule();
+       }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_queue_job(struct mali_pp_job *job, struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       mali_pm_core_event(MALI_CORE_EVENT_PP_START);
+
+       mali_pp_scheduler_lock();
+
+       if (mali_pp_job_is_virtual(job))
+       {
+               /* Virtual job */
+               virtual_job_queue_depth += 1;
+               _mali_osk_list_addtail(&job->list, &virtual_job_queue);
+       }
+       else
+       {
+               job_queue_depth += mali_pp_job_get_sub_job_count(job);
+               _mali_osk_list_addtail(&job->list, &job_queue);
+       }
+
+       if (mali_pp_job_has_active_barrier(job) && _mali_osk_list_empty(&session->job_list))
+       {
+               /* No running jobs on this session, so barrier condition already met */
+               mali_pp_job_barrier_enforced(job);
+       }
+
+       /* Add job to session list */
+       _mali_osk_list_addtail(&job->session_list, &session->job_list);
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued\n",
+                            mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
+                            mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+       mali_pp_scheduler_unlock();
+}
+
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+static void sync_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
+{
+       struct mali_pp_job *job = _MALI_OSK_CONTAINER_OF(waiter, struct mali_pp_job, sync_waiter);
+
+       /* Schedule sync_callback_work */
+       _mali_osk_wq_schedule_work(job->sync_work);
+}
+
+static void sync_callback_work(void *arg)
+{
+       struct mali_pp_job *job = (struct mali_pp_job *)arg;
+       struct mali_session_data *session;
+       int err;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = job->session;
+
+       /* Remove job from session pending job list */
+       _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+       _mali_osk_list_delinit(&job->list);
+       _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+
+       err = sync_fence_wait(job->pre_fence, 0);
+       if (likely(0 == err))
+       {
+               MALI_DEBUG_PRINT(3, ("Mali sync: Job %d ready to run\n", mali_pp_job_get_id(job)));
+
+               mali_pp_scheduler_queue_job(job, session);
+
+               mali_pp_scheduler_schedule();
+       }
+       else
+       {
+               /* Fence signaled error */
+               MALI_DEBUG_PRINT(3, ("Mali sync: Job %d abort due to sync error\n", mali_pp_job_get_id(job)));
+
+               if (job->sync_point) mali_sync_signal_pt(job->sync_point, err);
+
+               mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+               mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+       }
+}
+#endif
+#endif
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs, int *fence)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       int post_fence = -1;
+#endif
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data*)ctx;
+
+       job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+       if (NULL == job)
+       {
+               MALI_PRINT_ERROR(("Failed to create job!\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_pp_job_check(job))
+       {
+               /* Not a valid job, return to user immediately */
+               mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+               mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+               return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+       }
+
+#if PROFILING_SKIP_PP_JOBS || PROFILING_SKIP_PP_AND_GP_JOBS
+#warning PP jobs will not be executed
+       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+       return _MALI_OSK_ERR_OK;
+#endif
+
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       if (_MALI_PP_JOB_FLAG_FENCE & job->uargs.flags)
+       {
+               job->sync_point = mali_stream_create_point(job->uargs.stream);
+
+               if (unlikely(NULL == job->sync_point))
+               {
+                       /* Fence creation failed. */
+                       MALI_DEBUG_PRINT(2, ("Failed to create sync point for job %d\n", mali_pp_job_get_id(job)));
+                       mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+                       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+                       return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+               }
+
+               post_fence = mali_stream_create_fence(job->sync_point);
+
+               if (unlikely(0 > post_fence))
+               {
+                       /* Fence creation failed. */
+                       /* mali_stream_create_fence already freed the sync_point */
+                       MALI_DEBUG_PRINT(2, ("Failed to create fence for job %d\n", mali_pp_job_get_id(job)));
+                       mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+                       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+                       return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+               }
+
+               /* Grab a reference to the fence. It must be around when the
+                * job is completed, so the point can be signalled. */
+               sync_fence_fdget(post_fence);
+
+               *fence = post_fence;
+
+               MALI_DEBUG_PRINT(3, ("Sync: Created fence %d for job %d\n", post_fence, mali_pp_job_get_id(job)));
+       }
+
+       if (0 < job->uargs.fence)
+       {
+               int pre_fence_fd = job->uargs.fence;
+               int err;
+
+               MALI_DEBUG_PRINT(2, ("Sync: Job %d waiting for fence %d\n", mali_pp_job_get_id(job), pre_fence_fd));
+
+               job->pre_fence = sync_fence_fdget(pre_fence_fd); /* Reference will be released when job is deleted. */
+               if (NULL == job->pre_fence)
+               {
+                       MALI_DEBUG_PRINT(2, ("Failed to import fence %d\n", pre_fence_fd));
+                       if (job->sync_point) mali_sync_signal_pt(job->sync_point, -EINVAL);
+                       mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+                       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+                       return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+               }
+
+               job->sync_work = _mali_osk_wq_create_work(sync_callback_work, (void*)job);
+               if (NULL == job->sync_work)
+               {
+                       if (job->sync_point) mali_sync_signal_pt(job->sync_point, -ENOMEM);
+                       mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+                       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+                       return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+               }
+
+               /* Add pending job to session pending job list */
+               _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_list_addtail(&job->list, &session->pending_jobs);
+               _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+
+               sync_fence_waiter_init(&job->sync_waiter, sync_callback);
+               err = sync_fence_wait_async(job->pre_fence, &job->sync_waiter);
+
+               if (0 != err)
+               {
+                       /* No async wait started, remove job from session pending job list */
+                       _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+                       _mali_osk_list_delinit(&job->list);
+                       _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+               }
+
+               if (1 == err)
+               {
+                       /* Fence has already signalled */
+                       mali_pp_scheduler_queue_job(job, session);
+                       if (0 == _mali_osk_list_empty(&group_list_idle)) mali_pp_scheduler_schedule();
+                       return _MALI_OSK_ERR_OK;
+               }
+               else if (0 > err)
+               {
+                       /* Sync fail */
+                       if (job->sync_point) mali_sync_signal_pt(job->sync_point, err);
+                       mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+                       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+                       return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+               }
+
+       }
+       else
+#endif
+#endif /* CONFIG_SYNC */
+       {
+               mali_pp_scheduler_queue_job(job, session);
+
+               if (!_mali_osk_list_empty(&group_list_idle) || !virtual_group_working)
+               {
+                       mali_pp_scheduler_schedule();
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+       args->number_of_cores = num_cores;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+       args->version = pp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+       session = (struct mali_session_data*)args->ctx;
+
+       /* Check queue for jobs that match */
+       mali_pp_scheduler_lock();
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_pp_job, list)
+       {
+               if (mali_pp_job_get_session(job) == session &&
+                   mali_pp_job_get_frame_builder_id(job) == (u32)args->fb_id &&
+                   mali_pp_job_get_flush_id(job) == (u32)args->flush_id)
+               {
+                       if (args->wbx & _MALI_UK_PP_JOB_WB0)
+                       {
+                               mali_pp_job_disable_wb0(job);
+                       }
+                       if (args->wbx & _MALI_UK_PP_JOB_WB1)
+                       {
+                               mali_pp_job_disable_wb1(job);
+                       }
+                       if (args->wbx & _MALI_UK_PP_JOB_WB2)
+                       {
+                               mali_pp_job_disable_wb2(job);
+                       }
+                       break;
+               }
+       }
+       mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+       struct mali_pp_job *job, *tmp_job;
+       struct mali_group *group, *tmp_group;
+       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+       s32 i = 0;
+
+       mali_pp_scheduler_lock();
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->job_list, struct mali_pp_job, session_list)
+       {
+               /* Remove job from queue (if it's not queued, list_del has no effect) */
+               _mali_osk_list_delinit(&job->list);
+
+               if (mali_pp_job_is_virtual(job))
+               {
+                       MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+                       if (0 == mali_pp_job_get_first_unstarted_sub_job(job))
+                       {
+                               --virtual_job_queue_depth;
+                       }
+               }
+               else
+               {
+                       job_queue_depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
+               }
+
+               /* Mark all unstarted jobs as failed */
+               mali_pp_job_mark_unstarted_failed(job);
+
+               if (mali_pp_job_is_complete(job))
+               {
+                       _mali_osk_list_del(&job->session_list);
+
+                       /* It is safe to delete the job, since it won't land in job_done() */
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Aborted PP job 0x%08x\n", job));
+                       mali_pp_job_delete(job);
+
+                       mali_pm_core_event(MALI_CORE_EVENT_PP_STOP);
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Keeping partially started PP job 0x%08x in session\n", job));
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list)
+       {
+               groups[i++] = group;
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list)
+       {
+               groups[i++] = group;
+       }
+
+       mali_pp_scheduler_unlock();
+
+       /* Abort running jobs from this session */
+       while (i > 0)
+       {
+               mali_group_abort_session(groups[--i], session);
+       }
+
+       if (NULL != virtual_group)
+       {
+               mali_group_abort_session(virtual_group, session);
+       }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void)
+{
+       mali_bool ret;
+
+       mali_pp_scheduler_lock();
+       ret = pause_count > 0 && _mali_osk_list_empty(&group_list_working) && !virtual_group_working;
+       mali_pp_scheduler_unlock();
+
+       return ret;
+}
+
+int mali_pp_scheduler_get_queue_depth(void)
+{
+       return job_queue_depth;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+       n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list)
+       {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list)
+       {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       if (NULL != virtual_group)
+       {
+               n += mali_group_dump_state(virtual_group, buf + n, size -n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+       return n;
+}
+#endif
+
+/* This function is intended for power on reset of all cores.
+ * No locking is done for the list iteration, which can only be safe if the
+ * scheduler is paused and all cores idle. That is always the case on init and
+ * power on. */
+void mali_pp_scheduler_reset_all_groups(void)
+{
+       struct mali_group *group, *temp;
+
+       if (NULL != virtual_group)
+       {
+               mali_group_reset(virtual_group);
+       }
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list)
+       {
+               mali_group_reset(group);
+       }
+}
+
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+       struct mali_group *group, *temp;
+       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+       s32 i = 0;
+
+       if (NULL != virtual_group)
+       {
+               mali_group_zap_session(virtual_group, session);
+       }
+
+       mali_pp_scheduler_lock();
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list)
+       {
+               groups[i++] = group;
+       }
+       mali_pp_scheduler_unlock();
+
+       while (i > 0)
+       {
+               mali_group_zap_session(groups[--i], session);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_pp_scheduler.h b/drivers/gpu/arm/mali400/mali/common/mali_pp_scheduler.h
new file mode 100644 (file)
index 0000000..4b2fec4
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+/** @brief Abort all PP jobs from session running or queued
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the queue and jobs
+ * currently running on a core will be aborted.
+ *
+ * @param session Pointer to session whose jobs should be aborted
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the PP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ *
+ * This function is intended for power on reset of all cores.
+ * No locking is done, which can only be safe if the scheduler is paused and
+ * all cores idle. That is always the case on init and power on.
+ */
+void mali_pp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
+
+int mali_pp_scheduler_get_queue_depth(void);
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_scheduler.c b/drivers/gpu/arm/mali400/mali/common/mali_scheduler.c
new file mode 100644 (file)
index 0000000..e5d8186
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static _mali_osk_atomic_t mali_job_autonumber;
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_autonumber, 0))
+       {
+               MALI_DEBUG_PRINT(1,  ("Initialization of atomic job id counter failed.\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+       _mali_osk_atomic_term(&mali_job_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+       u32 job_id = _mali_osk_atomic_inc_return(&mali_job_autonumber);
+       return job_id;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_scheduler.h b/drivers/gpu/arm/mali400/mali/common/mali_scheduler.h
new file mode 100644 (file)
index 0000000..4fff577
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the both the PP and GP scheuduler.
+ * This must be called after the Mali HW has been powered on in order to reset
+ * the HW.
+ */
+MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+{
+       mali_gp_scheduler_reset_all_groups();
+       mali_pp_scheduler_reset_all_groups();
+}
+
+/**
+ * @brief Zap TLB on all active groups running \a session
+ *
+ * @param session Pointer to the session to zap
+ */
+MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+{
+       mali_gp_scheduler_zap_all_active(session);
+       mali_pp_scheduler_zap_all_active(session);
+}
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_session.c b/drivers/gpu/arm/mali400/mali/common/mali_session.c
new file mode 100644 (file)
index 0000000..14729de
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+
+_mali_osk_lock_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+       _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+       mali_sessions_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+       if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+       _mali_osk_lock_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+       mali_session_lock();
+       _mali_osk_list_add(&session->link, &mali_sessions);
+       mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+       mali_session_lock();
+       _mali_osk_list_delinit(&session->link);
+       mali_session_unlock();
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_session.h b/drivers/gpu/arm/mali400/mali/common/mali_session.h
new file mode 100644 (file)
index 0000000..0d1b672
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_session_data
+{
+       _mali_osk_notification_queue_t * ioctl_queue;
+
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       _mali_osk_list_t pending_jobs;
+       _mali_osk_lock_t *pending_jobs_lock;
+#endif
+#endif
+
+       _mali_osk_lock_t *memory_lock; /**< Lock protecting the vm manipulation */
+       mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+       _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+       struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+       _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+
+       _MALI_OSK_LIST_HEAD(job_list); /**< List of all jobs on this session */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_lock_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+       _mali_osk_lock_wait(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+       _mali_osk_lock_signal(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+       _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+       return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+       _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_ukk.h b/drivers/gpu/arm/mali400/mali/common/mali_ukk.h
new file mode 100644 (file)
index 0000000..a3a7e7b
--- /dev/null
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ *   -  The Device Driver has implemented the _mali_ukk set of functions
+ *   -  The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ *   - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ *     return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ *     void *ctx;
+ *     u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ *  function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ *  interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems,  which would
+ * not otherwise get called on RTOS systems.
+ *     - For example, a  U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * -  Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ *     - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ *     - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ *     - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ *     - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ *  meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ *     - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ *     - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored  elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if defined(CONFIG_MALI400_UMP)
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* CONFIG_MALI400_UMP */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @oaram ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( void *ctx, _mali_uk_pp_start_job_s *uargs, int *fence );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @oaram ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( void *ctx, _mali_uk_gp_start_job_s *uargs );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_user_settings_db.c b/drivers/gpu/arm/mali400/mali/common/mali_user_settings_db.c
new file mode 100644 (file)
index 0000000..389ab69
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+       struct mali_session_data *session, *tmp;
+
+       mali_session_lock();
+       MALI_SESSION_FOREACH(session, tmp, link)
+       {
+               _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED, sizeof(_mali_uk_settings_changed_s));
+               _mali_uk_settings_changed_s *data = notobj->result_buffer;
+               data->setting = setting;
+               data->value = value;
+
+               mali_session_send_notification(session, notobj);
+       }
+       mali_session_unlock();
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+       mali_bool notify = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT(setting < _MALI_UK_USER_SETTING_MAX && setting >= 0);
+
+       if (mali_user_settings[setting] != value)
+       {
+               notify = MALI_TRUE;
+       }
+
+       mali_user_settings[setting] = value;
+
+       if (notify)
+       {
+               mali_user_settings_notify(setting, value);
+       }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+       MALI_DEBUG_ASSERT(setting < _MALI_UK_USER_SETTING_MAX && setting >= 0);
+
+       return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+       _mali_uk_user_setting_t setting;
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       setting = args->setting;
+
+       if (0 <= setting && _MALI_UK_USER_SETTING_MAX > setting)
+       {
+               args->value = mali_user_settings[setting];
+               return _MALI_OSK_ERR_OK;
+       }
+       else
+       {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/mali400/mali/common/mali_user_settings_db.h b/drivers/gpu/arm/mali400/mali/common/mali_user_settings_db.h
new file mode 100644 (file)
index 0000000..c93dfc7
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif  /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard.h b/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard.h
new file mode 100644 (file)
index 0000000..71ec677
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+/* Mali-200 */
+
+#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
+       MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
+       MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
+       MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+/* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCE_L2(addr) \
+       { \
+               .name = "Mali_L2", \
+               .flags = IORESOURCE_MEM, \
+               .start = addr, \
+               .end   = addr + 0x200, \
+       },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+       { \
+               .name = "Mali_GP", \
+               .flags = IORESOURCE_MEM, \
+               .start = gp_addr, \
+               .end =   gp_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = gp_irq, \
+               .end   = gp_irq, \
+       }, \
+
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+       { \
+               .name = "Mali_GP", \
+               .flags = IORESOURCE_MEM, \
+               .start = gp_addr, \
+               .end =   gp_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = gp_irq, \
+               .end   = gp_irq, \
+       }, \
+       { \
+               .name = "Mali_GP_MMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = gp_mmu_addr, \
+               .end =   gp_mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_MMU_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = gp_mmu_irq, \
+               .end =   gp_mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+       { \
+               .name = "Mali_PP", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_addr, \
+               .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_irq, \
+               .end =   pp_irq, \
+       }, \
+
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+       { \
+               .name = "Mali_PP" #id, \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_addr, \
+               .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_irq, \
+               .end =   pp_irq, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_MMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_mmu_addr, \
+               .end =   pp_mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_MMU_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_mmu_irq, \
+               .end =   pp_mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+       { \
+               .name = "Mali_MMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = mmu_addr, \
+               .end =   mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_MMU_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = mmu_irq, \
+               .end =   mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+       { \
+               .name = "Mali_PMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = pmu_addr, \
+               .end =   pmu_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+       { \
+               .name = "Mali_DLBU", \
+               .flags = IORESOURCE_MEM, \
+               .start = dlbu_addr, \
+               .end = dlbu_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+       { \
+               .name = "Mali_Broadcast", \
+               .flags = IORESOURCE_MEM, \
+               .start = bcast_addr, \
+               .end = bcast_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+       { \
+               .name = "Mali_PP_Broadcast", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_addr, \
+               .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP_Broadcast_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_irq, \
+               .end =   pp_irq, \
+       }, \
+
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+       { \
+               .name = "Mali_PP_MMU_Broadcast", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_mmu_bcast_addr, \
+               .end = pp_mmu_bcast_addr + 0x100, \
+       },
+
+struct mali_gpu_device_data
+{
+       /* Dedicated GPU memory range (physical). */
+       unsigned long dedicated_mem_start;
+       unsigned long dedicated_mem_size;
+
+       /* Shared GPU memory */
+       unsigned long shared_mem_size;
+
+       /* Frame buffer memory to be accessible by Mali GPU (physical) */
+       unsigned long fb_start;
+       unsigned long fb_size;
+
+       /* Report GPU utilization in this interval (specified in ms) */
+       unsigned long utilization_interval;
+
+       /* Function that will receive periodic GPU utilization numbers */
+       void (*utilization_handler)(unsigned int);
+};
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ * 
+ * called to power down all cores 
+ */
+int mali_pmu_powerdown(void);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ * 
+ * called to power up all cores 
+ */
+int mali_pmu_powerup(void);
+
+
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_counters.h b/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_counters.h
new file mode 100644 (file)
index 0000000..ca58bda
--- /dev/null
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_COUNTERS_H_
+#define _MALI_UTGARD_COUNTERS_H_
+
+typedef struct
+{
+       void *unused;
+} mali_cinstr_counter_info;
+
+typedef enum
+{
+       MALI_CINSTR_COUNTER_SOURCE_EGL      =     0,
+       MALI_CINSTR_COUNTER_SOURCE_OPENGLES =  1000,
+       MALI_CINSTR_COUNTER_SOURCE_OPENVG   =  2000,
+       MALI_CINSTR_COUNTER_SOURCE_GP       =  3000,
+       MALI_CINSTR_COUNTER_SOURCE_PP       =  4000,
+} cinstr_counter_source;
+
+#define MALI_CINSTR_EGL_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_EGL
+#define MALI_CINSTR_EGL_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_EGL + 999)
+
+#define MALI_CINSTR_GLES_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENGLES
+#define MALI_CINSTR_GLES_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 999)
+
+#define MALI_CINSTR_VG_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENVG
+#define MALI_CINSTR_VG_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENVG + 999)
+
+#define MALI_CINSTR_GP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_GP
+#define MALI_CINSTR_GP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_GP + 999)
+
+#define MALI_CINSTR_PP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_PP
+#define MALI_CINSTR_PP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_PP + 999)
+
+
+typedef enum
+{
+       /* EGL counters */
+
+       MALI_CINSTR_EGL_BLIT_TIME                                            = MALI_CINSTR_COUNTER_SOURCE_EGL + 0,
+
+       /* Last counter in the EGL set */
+       MALI_CINSTR_EGL_MAX_COUNTER                                           = MALI_CINSTR_COUNTER_SOURCE_EGL + 1,
+
+       /* GLES counters */
+
+       MALI_CINSTR_GLES_DRAW_ELEMENTS_CALLS                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 0,
+       MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_INDICES                           = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 1,
+       MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED                       = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 2,
+       MALI_CINSTR_GLES_DRAW_ARRAYS_CALLS                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 3,
+       MALI_CINSTR_GLES_DRAW_ARRAYS_NUM_TRANSFORMED                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 4,
+       MALI_CINSTR_GLES_DRAW_POINTS                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 5,
+       MALI_CINSTR_GLES_DRAW_LINES                                          = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 6,
+       MALI_CINSTR_GLES_DRAW_LINE_LOOP                                      = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 7,
+       MALI_CINSTR_GLES_DRAW_LINE_STRIP                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 8,
+       MALI_CINSTR_GLES_DRAW_TRIANGLES                                      = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 9,
+       MALI_CINSTR_GLES_DRAW_TRIANGLE_STRIP                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 10,
+       MALI_CINSTR_GLES_DRAW_TRIANGLE_FAN                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 11,
+       MALI_CINSTR_GLES_NON_VBO_DATA_COPY_TIME                              = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 12,
+       MALI_CINSTR_GLES_UNIFORM_BYTES_COPIED_TO_MALI                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 13,
+       MALI_CINSTR_GLES_UPLOAD_TEXTURE_TIME                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 14,
+       MALI_CINSTR_GLES_UPLOAD_VBO_TIME                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 15,
+       MALI_CINSTR_GLES_NUM_FLUSHES                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 16,
+       MALI_CINSTR_GLES_NUM_VSHADERS_GENERATED                              = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 17,
+       MALI_CINSTR_GLES_NUM_FSHADERS_GENERATED                              = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 18,
+       MALI_CINSTR_GLES_VSHADER_GEN_TIME                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 19,
+       MALI_CINSTR_GLES_FSHADER_GEN_TIME                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 20,
+       MALI_CINSTR_GLES_INPUT_TRIANGLES                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 21,
+       MALI_CINSTR_GLES_VXCACHE_HIT                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 22,
+       MALI_CINSTR_GLES_VXCACHE_MISS                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 23,
+       MALI_CINSTR_GLES_VXCACHE_COLLISION                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 24,
+       MALI_CINSTR_GLES_CULLED_TRIANGLES                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 25,
+       MALI_CINSTR_GLES_CULLED_LINES                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 26,
+       MALI_CINSTR_GLES_BACKFACE_TRIANGLES                                  = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 27,
+       MALI_CINSTR_GLES_GBCLIP_TRIANGLES                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 28,
+       MALI_CINSTR_GLES_GBCLIP_LINES                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 29,
+       MALI_CINSTR_GLES_TRIANGLES_DRAWN                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 30,
+       MALI_CINSTR_GLES_DRAWCALL_TIME                                       = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 31,
+       MALI_CINSTR_GLES_TRIANGLES_COUNT                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 32,
+       MALI_CINSTR_GLES_INDEPENDENT_TRIANGLES_COUNT                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 33,
+       MALI_CINSTR_GLES_STRIP_TRIANGLES_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 34,
+       MALI_CINSTR_GLES_FAN_TRIANGLES_COUNT                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 35,
+       MALI_CINSTR_GLES_LINES_COUNT                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 36,
+       MALI_CINSTR_GLES_INDEPENDENT_LINES_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 37,
+       MALI_CINSTR_GLES_STRIP_LINES_COUNT                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 38,
+       MALI_CINSTR_GLES_LOOP_LINES_COUNT                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 39,
+       MALI_CINSTR_GLES_POINTS_COUNT                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 40,
+
+       /* Last counter in the GLES set */
+       MALI_CINSTR_GLES_MAX_COUNTER                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 41,
+
+       /* OpenVG counters */
+
+       MALI_CINSTR_VG_MASK_COUNTER                                          = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 0,
+       MALI_CINSTR_VG_CLEAR_COUNTER                                         = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 1,
+       MALI_CINSTR_VG_APPEND_PATH_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 2,
+       MALI_CINSTR_VG_APPEND_PATH_DATA_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 3,
+       MALI_CINSTR_VG_MODIFY_PATH_COORDS_COUNTER                            = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 4,
+       MALI_CINSTR_VG_TRANSFORM_PATH_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 5,
+       MALI_CINSTR_VG_INTERPOLATE_PATH_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 6,
+       MALI_CINSTR_VG_PATH_LENGTH_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 7,
+       MALI_CINSTR_VG_POINT_ALONG_PATH_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 8,
+       MALI_CINSTR_VG_PATH_BOUNDS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 9,
+       MALI_CINSTR_VG_PATH_TRANSFORMED_BOUNDS_COUNTER                       = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 10,
+       MALI_CINSTR_VG_DRAW_PATH_COUNTER                                     = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 11,
+       MALI_CINSTR_VG_CLEAR_IMAGE_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 12,
+       MALI_CINSTR_VG_IMAGE_SUB_DATA_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 13,
+       MALI_CINSTR_VG_GET_IMAGE_SUB_DATA_COUNTER                            = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 14,
+       MALI_CINSTR_VG_COPY_IMAGE_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 15,
+       MALI_CINSTR_VG_DRAW_IMAGE_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 16,
+       MALI_CINSTR_VG_SET_PIXELS_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 17,
+       MALI_CINSTR_VG_WRITE_PIXELS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 18,
+       MALI_CINSTR_VG_GET_PIXELS_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 19,
+       MALI_CINSTR_VG_READ_PIXELS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 20,
+       MALI_CINSTR_VG_COPY_PIXELS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 21,
+       MALI_CINSTR_VG_COLOR_MATRIX_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 22,
+       MALI_CINSTR_VG_CONVOLVE_COUNTER                                      = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 23,
+       MALI_CINSTR_VG_SEPARABLE_CONVOLVE_COUNTER                            = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 24,
+       MALI_CINSTR_VG_GAUSSIAN_BLUR_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 25,
+       MALI_CINSTR_VG_LOOKUP_COUNTER                                        = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 26,
+       MALI_CINSTR_VG_LOOKUP_SINGLE_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 27,
+       MALI_CINSTR_VG_CONTEXT_CREATE_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 28,
+       MALI_CINSTR_VG_STROKED_CUBICS_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 29,
+       MALI_CINSTR_VG_STROKED_QUADS_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 30,
+       MALI_CINSTR_VG_STROKED_ARCS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 31,
+       MALI_CINSTR_VG_STROKED_LINES_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 32,
+       MALI_CINSTR_VG_FILLED_CUBICS_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 33,
+       MALI_CINSTR_VG_FILLED_QUADS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 34,
+       MALI_CINSTR_VG_FILLED_ARCS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 35,
+       MALI_CINSTR_VG_FILLED_LINES_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 36,
+       MALI_CINSTR_VG_DRAW_PATH_CALLS_COUNTER                               = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 37,
+       MALI_CINSTR_VG_TRIANGLES_COUNTER                                     = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 38,
+       MALI_CINSTR_VG_VERTICES_COUNTER                                      = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 39,
+       MALI_CINSTR_VG_INDICES_COUNTER                                       = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 40,
+       MALI_CINSTR_VG_FILLED_PATHS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 41,
+       MALI_CINSTR_VG_STROKED_PATHS_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 42,
+       MALI_CINSTR_VG_FILL_EXTRACT_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 43,
+       MALI_CINSTR_VG_DRAW_FILLED_PATH_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 44,
+       MALI_CINSTR_VG_STROKE_EXTRACT_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 45,
+       MALI_CINSTR_VG_DRAW_STROKED_PATH_COUNTER                             = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 46,
+       MALI_CINSTR_VG_DRAW_PAINT_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 47,
+       MALI_CINSTR_VG_DATA_STRUCTURES_COUNTER                               = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 48,
+       MALI_CINSTR_VG_MEM_PATH_COUNTER                                      = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 49,
+       MALI_CINSTR_VG_RSW_COUNTER                                           = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 50,
+
+       /* Last counter in the VG set */
+       MALI_CINSTR_VG_MAX_COUNTER                                           = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 51,
+
+       /* Mali GP counters */
+
+       MALI_CINSTR_GP_DEPRECATED_0                                          = MALI_CINSTR_COUNTER_SOURCE_GP + 0,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_GP                                      = MALI_CINSTR_COUNTER_SOURCE_GP + 1,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER                           = MALI_CINSTR_COUNTER_SOURCE_GP + 2,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_STORER                           = MALI_CINSTR_COUNTER_SOURCE_GP + 3,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_LOADER                           = MALI_CINSTR_COUNTER_SOURCE_GP + 4,
+       MALI_CINSTR_GP_CYCLES_VERTEX_LOADER_WAITING_FOR_VERTEX_SHADER        = MALI_CINSTR_COUNTER_SOURCE_GP + 5,
+       MALI_CINSTR_GP_NUMBER_OF_WORDS_READ                                  = MALI_CINSTR_COUNTER_SOURCE_GP + 6,
+       MALI_CINSTR_GP_NUMBER_OF_WORDS_WRITTEN                               = MALI_CINSTR_COUNTER_SOURCE_GP + 7,
+       MALI_CINSTR_GP_NUMBER_OF_READ_BURSTS                                 = MALI_CINSTR_COUNTER_SOURCE_GP + 8,
+       MALI_CINSTR_GP_NUMBER_OF_WRITE_BURSTS                                = MALI_CINSTR_COUNTER_SOURCE_GP + 9,
+       MALI_CINSTR_GP_NUMBER_OF_VERTICES_PROCESSED                          = MALI_CINSTR_COUNTER_SOURCE_GP + 10,
+       MALI_CINSTR_GP_NUMBER_OF_VERTICES_FETCHED                            = MALI_CINSTR_COUNTER_SOURCE_GP + 11,
+       MALI_CINSTR_GP_NUMBER_OF_PRIMITIVES_FETCHED                          = MALI_CINSTR_COUNTER_SOURCE_GP + 12,
+       MALI_CINSTR_GP_RESERVED_13                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 13,
+       MALI_CINSTR_GP_NUMBER_OF_BACKFACE_CULLINGS_DONE                      = MALI_CINSTR_COUNTER_SOURCE_GP + 14,
+       MALI_CINSTR_GP_NUMBER_OF_COMMANDS_WRITTEN_TO_TILES                   = MALI_CINSTR_COUNTER_SOURCE_GP + 15,
+       MALI_CINSTR_GP_NUMBER_OF_MEMORY_BLOCKS_ALLOCATED                     = MALI_CINSTR_COUNTER_SOURCE_GP + 16,
+       MALI_CINSTR_GP_RESERVED_17                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 17,
+       MALI_CINSTR_GP_RESERVED_18                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 18,
+       MALI_CINSTR_GP_NUMBER_OF_VERTEX_LOADER_CACHE_MISSES                  = MALI_CINSTR_COUNTER_SOURCE_GP + 19,
+       MALI_CINSTR_GP_RESERVED_20                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 20,
+       MALI_CINSTR_GP_RESERVED_21                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 21,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER_COMMAND_PROCESSOR         = MALI_CINSTR_COUNTER_SOURCE_GP + 22,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_COMMAND_PROCESSOR                  = MALI_CINSTR_COUNTER_SOURCE_GP + 23,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_LIST_WRITER                        = MALI_CINSTR_COUNTER_SOURCE_GP + 24,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_THROUGH_THE_PREPARE_LIST_COMMANDS       = MALI_CINSTR_COUNTER_SOURCE_GP + 25,
+       MALI_CINSTR_GP_RESERVED_26                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 26,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PRIMITIVE_ASSEMBLY                      = MALI_CINSTR_COUNTER_SOURCE_GP + 27,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_VERTEX_FETCHER                     = MALI_CINSTR_COUNTER_SOURCE_GP + 28,
+       MALI_CINSTR_GP_RESERVED_29                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 29,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_BOUNDINGBOX_AND_COMMAND_GENERATOR       = MALI_CINSTR_COUNTER_SOURCE_GP + 30,
+       MALI_CINSTR_GP_RESERVED_31                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 31,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_SCISSOR_TILE_ITERATOR                   = MALI_CINSTR_COUNTER_SOURCE_GP + 32,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_TILE_ITERATOR                      = MALI_CINSTR_COUNTER_SOURCE_GP + 33,
+       MALI_CINSTR_GP_JOB_COUNT                                             = MALI_CINSTR_COUNTER_SOURCE_GP + 900,
+
+       /* Mali PP counters */
+
+       MALI_CINSTR_PP_ACTIVE_CLOCK_CYCLES_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_PP + 0,
+       MALI_CINSTR_PP_TOTAL_CLOCK_CYCLES_COUNT_REMOVED                      = MALI_CINSTR_COUNTER_SOURCE_PP + 1,
+       MALI_CINSTR_PP_TOTAL_BUS_READS                                       = MALI_CINSTR_COUNTER_SOURCE_PP + 2,
+       MALI_CINSTR_PP_TOTAL_BUS_WRITES                                      = MALI_CINSTR_COUNTER_SOURCE_PP + 3,
+       MALI_CINSTR_PP_BUS_READ_REQUEST_CYCLES_COUNT                         = MALI_CINSTR_COUNTER_SOURCE_PP + 4,
+       MALI_CINSTR_PP_BUS_WRITE_REQUEST_CYCLES_COUNT                        = MALI_CINSTR_COUNTER_SOURCE_PP + 5,
+       MALI_CINSTR_PP_BUS_READ_TRANSACTIONS_COUNT                           = MALI_CINSTR_COUNTER_SOURCE_PP + 6,
+       MALI_CINSTR_PP_BUS_WRITE_TRANSACTIONS_COUNT                          = MALI_CINSTR_COUNTER_SOURCE_PP + 7,
+       MALI_CINSTR_PP_RESERVED_08                                           = MALI_CINSTR_COUNTER_SOURCE_PP + 8,
+       MALI_CINSTR_PP_TILE_WRITEBACK_WRITES                                 = MALI_CINSTR_COUNTER_SOURCE_PP + 9,
+       MALI_CINSTR_PP_STORE_UNIT_WRITES                                     = MALI_CINSTR_COUNTER_SOURCE_PP + 10,
+       MALI_CINSTR_PP_RESERVED_11                                           = MALI_CINSTR_COUNTER_SOURCE_PP + 11,
+       MALI_CINSTR_PP_PALETTE_CACHE_READS                                   = MALI_CINSTR_COUNTER_SOURCE_PP + 12,
+       MALI_CINSTR_PP_TEXTURE_CACHE_UNCOMPRESSED_READS                      = MALI_CINSTR_COUNTER_SOURCE_PP + 13,
+       MALI_CINSTR_PP_POLYGON_LIST_READS                                    = MALI_CINSTR_COUNTER_SOURCE_PP + 14,
+       MALI_CINSTR_PP_RSW_READS                                             = MALI_CINSTR_COUNTER_SOURCE_PP + 15,
+       MALI_CINSTR_PP_VERTEX_CACHE_READS                                    = MALI_CINSTR_COUNTER_SOURCE_PP + 16,
+       MALI_CINSTR_PP_UNIFORM_REMAPPING_READS                               = MALI_CINSTR_COUNTER_SOURCE_PP + 17,
+       MALI_CINSTR_PP_PROGRAM_CACHE_READS                                   = MALI_CINSTR_COUNTER_SOURCE_PP + 18,
+       MALI_CINSTR_PP_VARYING_READS                                         = MALI_CINSTR_COUNTER_SOURCE_PP + 19,
+       MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_READS                             = MALI_CINSTR_COUNTER_SOURCE_PP + 20,
+       MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_REMAPPING_READS                   = MALI_CINSTR_COUNTER_SOURCE_PP + 21,
+       MALI_CINSTR_PP_TEXTURE_CACHE_COMPRESSED_READS                        = MALI_CINSTR_COUNTER_SOURCE_PP + 22,
+       MALI_CINSTR_PP_LOAD_UNIT_READS                                       = MALI_CINSTR_COUNTER_SOURCE_PP + 23,
+       MALI_CINSTR_PP_POLYGON_COUNT                                         = MALI_CINSTR_COUNTER_SOURCE_PP + 24,
+       MALI_CINSTR_PP_PIXEL_RECTANGLE_COUNT                                 = MALI_CINSTR_COUNTER_SOURCE_PP + 25,
+       MALI_CINSTR_PP_LINES_COUNT                                           = MALI_CINSTR_COUNTER_SOURCE_PP + 26,
+       MALI_CINSTR_PP_POINTS_COUNT                                          = MALI_CINSTR_COUNTER_SOURCE_PP + 27,
+       MALI_CINSTR_PP_STALL_CYCLES_POLYGON_LIST_READER                      = MALI_CINSTR_COUNTER_SOURCE_PP + 28,
+       MALI_CINSTR_PP_STALL_CYCLES_TRIANGLE_SETUP                           = MALI_CINSTR_COUNTER_SOURCE_PP + 29,
+       MALI_CINSTR_PP_QUAD_RASTERIZED_COUNT                                 = MALI_CINSTR_COUNTER_SOURCE_PP + 30,
+       MALI_CINSTR_PP_FRAGMENT_RASTERIZED_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_PP + 31,
+       MALI_CINSTR_PP_FRAGMENT_REJECTED_FRAGMENT_KILL_COUNT                 = MALI_CINSTR_COUNTER_SOURCE_PP + 32,
+       MALI_CINSTR_PP_FRAGMENT_REJECTED_FWD_FRAGMENT_KILL_COUNT             = MALI_CINSTR_COUNTER_SOURCE_PP + 33,
+       MALI_CINSTR_PP_FRAGMENT_PASSED_ZSTENCIL_COUNT                        = MALI_CINSTR_COUNTER_SOURCE_PP + 34,
+       MALI_CINSTR_PP_PATCHES_REJECTED_EARLY_Z_STENCIL_COUNT                = MALI_CINSTR_COUNTER_SOURCE_PP + 35,
+       MALI_CINSTR_PP_PATCHES_EVALUATED                                     = MALI_CINSTR_COUNTER_SOURCE_PP + 36,
+       MALI_CINSTR_PP_INSTRUCTION_COMPLETED_COUNT                           = MALI_CINSTR_COUNTER_SOURCE_PP + 37,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_RENDEZVOUS_COUNT                   = MALI_CINSTR_COUNTER_SOURCE_PP + 38,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_VARYING_MISS_COUNT                 = MALI_CINSTR_COUNTER_SOURCE_PP + 39,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_TEXTURE_MISS_COUNT                 = MALI_CINSTR_COUNTER_SOURCE_PP + 40,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_LOAD_MISS_COUNT                    = MALI_CINSTR_COUNTER_SOURCE_PP + 41,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_TILE_READ_MISS_COUNT               = MALI_CINSTR_COUNTER_SOURCE_PP + 42,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_STORE_MISS_COUNT                   = MALI_CINSTR_COUNTER_SOURCE_PP + 43,
+       MALI_CINSTR_PP_RENDEZVOUS_BREAKAGE_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_PP + 44,
+       MALI_CINSTR_PP_PIPELINE_BUBBLES_CYCLE_COUNT                          = MALI_CINSTR_COUNTER_SOURCE_PP + 45,
+       MALI_CINSTR_PP_TEXTURE_MAPPER_MULTIPASS_COUNT                        = MALI_CINSTR_COUNTER_SOURCE_PP + 46,
+       MALI_CINSTR_PP_TEXTURE_MAPPER_CYCLE_COUNT                            = MALI_CINSTR_COUNTER_SOURCE_PP + 47,
+       MALI_CINSTR_PP_VERTEX_CACHE_HIT_COUNT                                = MALI_CINSTR_COUNTER_SOURCE_PP + 48,
+       MALI_CINSTR_PP_VERTEX_CACHE_MISS_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 49,
+       MALI_CINSTR_PP_VARYING_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 50,
+       MALI_CINSTR_PP_VARYING_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 51,
+       MALI_CINSTR_PP_VARYING_CACHE_CONFLICT_MISS_COUNT                     = MALI_CINSTR_COUNTER_SOURCE_PP + 52,
+       MALI_CINSTR_PP_TEXTURE_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 53,
+       MALI_CINSTR_PP_TEXTURE_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 54,
+       MALI_CINSTR_PP_TEXTURE_CACHE_CONFLICT_MISS_COUNT                     = MALI_CINSTR_COUNTER_SOURCE_PP + 55,
+       MALI_CINSTR_PP_PALETTE_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 200 only */
+       MALI_CINSTR_PP_PALETTE_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 200 only */
+       MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_HIT_COUNT                    = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 400 class only */
+       MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_MISS_COUNT                   = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 400 class only */
+       MALI_CINSTR_PP_LOAD_STORE_CACHE_HIT_COUNT                            = MALI_CINSTR_COUNTER_SOURCE_PP + 58,
+       MALI_CINSTR_PP_LOAD_STORE_CACHE_MISS_COUNT                           = MALI_CINSTR_COUNTER_SOURCE_PP + 59,
+       MALI_CINSTR_PP_PROGRAM_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 60,
+       MALI_CINSTR_PP_PROGRAM_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 61,
+       MALI_CINSTR_PP_JOB_COUNT                                             = MALI_CINSTR_COUNTER_SOURCE_PP + 900,
+} cinstr_counters_m200_t;
+
+#endif /*_MALI_UTGARD_COUNTERS_H_*/
diff --git a/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_ioctl.h b/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_ioctl.h
new file mode 100644 (file)
index 0000000..f9ff110
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>       /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE           0x82
+#define MALI_IOC_CORE_BASE      (_MALI_UK_CORE_SUBSYSTEM      + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE    (_MALI_UK_MEMORY_SUBSYSTEM    + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE        (_MALI_UK_PP_SUBSYSTEM        + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE        (_MALI_UK_GP_SUBSYSTEM        + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE     (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
+#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_STREAM_CREATE              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_STREAM_CREATE, _mali_uk_stream_create_s *)
+#define MALI_IOC_FENCE_VALIDATE             _IOR(MALI_IOC_CORE_BASE, _MALI_UK_FENCE_VALIDATE, _mali_uk_fence_validate_s *)
+
+#define MALI_IOC_MEM_GET_BIG_BLOCK          _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK         _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_INIT                   _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM                   _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT                _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT              _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_ATTACH_DMA_BUF         _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
+#define MALI_IOC_MEM_RELEASE_DMA_BUF        _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
+#define MALI_IOC_MEM_ATTACH_UMP             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP            _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+
+#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET            _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET       _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+
+#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET      _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE      _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+
+#define MALI_IOC_PROFILING_START            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP             _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_PROFILING_GET_CONFIG       _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_profiling_events.h b/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100644 (file)
index 0000000..1642926
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_TYPE_SINGLE  = 0 << 24,
+       MALI_PROFILING_EVENT_TYPE_START   = 1 << 24,
+       MALI_PROFILING_EVENT_TYPE_STOP    = 2 << 24,
+       MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+       MALI_PROFILING_EVENT_TYPE_RESUME  = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_CHANNEL_SOFTWARE =  0 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_GP0      =  1 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP0      =  5 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP1      =  6 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP2      =  7 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP3      =  8 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP4      =  9 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP5      = 10 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP6      = 11 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP7      = 12 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_GPU      = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE                  = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME         = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH                 = 2,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS      = 3,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT              = 4,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE            = 5,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE            = 6,
+    MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC        = 10,
+    MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC        = 11,
+    MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK          = 53,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK              = 54,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK            = 55,
+       MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED           = 56,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL  = 0,
+       MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL   = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum
+{
+       /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE            = 0,*/
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI            = 1,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD   = 3,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF     = 4,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF      = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE                   =  0, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL          =  1, /* NOT used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC                  = 26, /* used in some build configurations */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT         = 27, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC         = 28, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE        = 30, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL     = 31, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS   = 32, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE= 33, /* NOT used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER       = 34, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER     = 35, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK               = 36, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK        = 37, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP               = 38, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE          = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT     = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH         = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE              = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE  = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS      = 2,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS      = 3,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS      = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum
+{
+       MALI_PROFILING_EVENT_DATA_CORE_GP0             =  1,
+       MALI_PROFILING_EVENT_DATA_CORE_PP0             =  5,
+       MALI_PROFILING_EVENT_DATA_CORE_PP1             =  6,
+       MALI_PROFILING_EVENT_DATA_CORE_PP2             =  7,
+       MALI_PROFILING_EVENT_DATA_CORE_PP3             =  8,
+       MALI_PROFILING_EVENT_DATA_CORE_PP4             =  9,
+       MALI_PROFILING_EVENT_DATA_CORE_PP5             = 10,
+       MALI_PROFILING_EVENT_DATA_CORE_PP6             = 11,
+       MALI_PROFILING_EVENT_DATA_CORE_PP7             = 12,
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_uk_types.h b/drivers/gpu/arm/mali400/mali/include/linux/mali/mali_utgard_uk_types.h
new file mode 100644 (file)
index 0000000..9ce6d23
--- /dev/null
@@ -0,0 +1,1132 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+    _MALI_UK_CORE_SUBSYSTEM,      /**< Core Group of U/K calls */
+    _MALI_UK_MEMORY_SUBSYSTEM,    /**< Memory Group of U/K calls */
+    _MALI_UK_PP_SUBSYSTEM,        /**< Fragment Processor Group of U/K calls */
+    _MALI_UK_GP_SUBSYSTEM,        /**< Vertex Processor Group of U/K calls */
+       _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+    _MALI_UK_PMM_SUBSYSTEM,       /**< Power Management Module Group of U/K calls */
+       _MALI_UK_VSYNC_SUBSYSTEM,     /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref  _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+       /** Core functions */
+
+    _MALI_UK_OPEN                    = 0, /**< _mali_ukk_open() */
+    _MALI_UK_CLOSE,                       /**< _mali_ukk_close() */
+    _MALI_UK_WAIT_FOR_NOTIFICATION,       /**< _mali_ukk_wait_for_notification() */
+    _MALI_UK_GET_API_VERSION,             /**< _mali_ukk_get_api_version() */
+    _MALI_UK_POST_NOTIFICATION,           /**< _mali_ukk_post_notification() */
+       _MALI_UK_GET_USER_SETTING,       /**< _mali_ukk_get_user_setting() *//**< [out] */
+       _MALI_UK_GET_USER_SETTINGS,       /**< _mali_ukk_get_user_settings() *//**< [out] */
+       _MALI_UK_STREAM_CREATE,           /**< _mali_ukk_stream_create() */
+       _MALI_UK_FENCE_VALIDATE,          /**< _mali_ukk_fence_validate() */
+
+       /** Memory functions */
+
+    _MALI_UK_INIT_MEM                = 0,    /**< _mali_ukk_init_mem() */
+    _MALI_UK_TERM_MEM,                       /**< _mali_ukk_term_mem() */
+    _MALI_UK_GET_BIG_BLOCK,                  /**< _mali_ukk_get_big_block() */
+    _MALI_UK_FREE_BIG_BLOCK,                 /**< _mali_ukk_free_big_block() */
+    _MALI_UK_MAP_MEM,                        /**< _mali_ukk_mem_mmap() */
+    _MALI_UK_UNMAP_MEM,                      /**< _mali_ukk_mem_munmap() */
+    _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+    _MALI_UK_DUMP_MMU_PAGE_TABLE,            /**< _mali_ukk_mem_dump_mmu_page_table() */
+    _MALI_UK_ATTACH_DMA_BUF,                 /**< _mali_ukk_attach_dma_buf() */
+    _MALI_UK_RELEASE_DMA_BUF,                /**< _mali_ukk_release_dma_buf() */
+    _MALI_UK_DMA_BUF_GET_SIZE,               /**< _mali_ukk_dma_buf_get_size() */
+    _MALI_UK_ATTACH_UMP_MEM,                 /**< _mali_ukk_attach_ump_mem() */
+    _MALI_UK_RELEASE_UMP_MEM,                /**< _mali_ukk_release_ump_mem() */
+    _MALI_UK_MAP_EXT_MEM,                    /**< _mali_uku_map_external_mem() */
+    _MALI_UK_UNMAP_EXT_MEM,                  /**< _mali_uku_unmap_external_mem() */
+    _MALI_UK_VA_TO_MALI_PA,                  /**< _mali_uku_va_to_mali_pa() */
+
+    /** Common functions for each core */
+
+    _MALI_UK_START_JOB           = 0,     /**< Start a Fragment/Vertex Processor Job on a core */
+    _MALI_UK_GET_NUMBER_OF_CORES,         /**< Get the number of Fragment/Vertex Processor cores */
+    _MALI_UK_GET_CORE_VERSION,            /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+    /** Fragment Processor Functions  */
+
+    _MALI_UK_PP_START_JOB            = _MALI_UK_START_JOB,            /**< _mali_ukk_pp_start_job() */
+    _MALI_UK_GET_PP_NUMBER_OF_CORES  = _MALI_UK_GET_NUMBER_OF_CORES,  /**< _mali_ukk_get_pp_number_of_cores() */
+    _MALI_UK_GET_PP_CORE_VERSION     = _MALI_UK_GET_CORE_VERSION,     /**< _mali_ukk_get_pp_core_version() */
+    _MALI_UK_PP_DISABLE_WB,                                           /**< _mali_ukk_pp_job_disable_wb() */
+
+    /** Vertex Processor Functions  */
+
+    _MALI_UK_GP_START_JOB            = _MALI_UK_START_JOB,            /**< _mali_ukk_gp_start_job() */
+    _MALI_UK_GET_GP_NUMBER_OF_CORES  = _MALI_UK_GET_NUMBER_OF_CORES,  /**< _mali_ukk_get_gp_number_of_cores() */
+    _MALI_UK_GET_GP_CORE_VERSION     = _MALI_UK_GET_CORE_VERSION,     /**< _mali_ukk_get_gp_core_version() */
+    _MALI_UK_GP_SUSPEND_RESPONSE,                                     /**< _mali_ukk_gp_suspend_response() */
+
+       /** Profiling functions */
+
+       _MALI_UK_PROFILING_START         = 0, /**< __mali_uku_profiling_start() */
+       _MALI_UK_PROFILING_ADD_EVENT,         /**< __mali_uku_profiling_add_event() */
+       _MALI_UK_PROFILING_STOP,              /**< __mali_uku_profiling_stop() */
+       _MALI_UK_PROFILING_GET_EVENT,         /**< __mali_uku_profiling_get_event() */
+       _MALI_UK_PROFILING_CLEAR,             /**< __mali_uku_profiling_clear() */
+       _MALI_UK_PROFILING_GET_CONFIG,        /**< __mali_uku_profiling_get_config() */
+       _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+
+       /** VSYNC reporting fuctions */
+       _MALI_UK_VSYNC_EVENT_REPORT      = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+       _MALI_DRIVER_MODE_RAW = 1,    /**< Reserved for future expansion */
+       _MALI_DRIVER_MODE_NORMAL = 2  /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+       _MALI_GP2 = 2,                /**< MaliGP2 Programmable Vertex Processor */
+       _MALI_200 = 5,                /**< Mali200 Programmable Fragment Processor */
+       _MALI_400_GP = 6,             /**< Mali400 Programmable Vertex Processor */
+       _MALI_400_PP = 7,             /**< Mali400 Programmable Fragment Processor */
+       /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+       _MALI_PP_READABLE   = (1<<0),  /** Readable by the Fragment Processor */
+       _MALI_PP_WRITEABLE  = (1<<1),  /** Writeable by the Fragment Processor */
+       _MALI_GP_READABLE   = (1<<2),  /** Readable by the Vertex Processor */
+       _MALI_GP_WRITEABLE  = (1<<3),  /** Writeable by the Vertex Processor */
+       _MALI_CPU_READABLE  = (1<<4),  /** Readable by the CPU */
+       _MALI_CPU_WRITEABLE = (1<<5),  /** Writeable by the CPU */
+       _MALI_GP_L2_ALLOC   = (1<<6),  /** GP allocate mali L2 cache lines*/
+       _MALI_MMU_READABLE  = _MALI_PP_READABLE | _MALI_GP_READABLE,   /** Readable by the MMU (including all cores behind it) */
+       _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+typedef enum mali_memory_cache_settings
+{
+       MALI_CACHE_STANDARD                     = 0,
+       MALI_CACHE_GP_READ_ALLOCATE     = 1,
+} mali_memory_cache_settings ;
+
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with  _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+       u32 size;                     /**< Size of the memory bank in bytes */
+       _mali_bus_usage flags;        /**< Capabilitiy flags of the memory */
+       u32 maximum_order_supported;  /**< log2 supported size */
+       u32 identifier;               /* mali_memory_cache_settings cache_settings; */
+       struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+       _MALIGP_JOB_ABORT,                  /**< Abort the Vertex Processor job */
+       _MALIGP_JOB_RESUME_WITH_NEW_HEAP    /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+       _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+       u32 arguments[2];               /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+    _MALI_UK_START_JOB_STARTED,                         /**< Job started */
+    _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE           /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job  */
+
+typedef enum
+{
+       _MALI_UK_JOB_STATUS_END_SUCCESS         = 1<<(16+0),
+       _MALI_UK_JOB_STATUS_END_OOM             = 1<<(16+1),
+       _MALI_UK_JOB_STATUS_END_ABORT           = 1<<(16+2),
+       _MALI_UK_JOB_STATUS_END_TIMEOUT_SW      = 1<<(16+3),
+       _MALI_UK_JOB_STATUS_END_HANG            = 1<<(16+4),
+       _MALI_UK_JOB_STATUS_END_SEG_FAULT       = 1<<(16+5),
+       _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB     = 1<<(16+6),
+       _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR     = 1<<(16+7),
+       _MALI_UK_JOB_STATUS_END_SHUTDOWN        = 1<<(16+8),
+       _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled.  This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+    void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+    u32 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+    u32 priority;                       /**< [in] job priority. A lower number means higher priority */
+    u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+    u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+    u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+    u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+       u32 frame_builder_id;               /**< [in] id of the originating frame builder */
+       u32 flush_id;                       /**< [in] flush id within the originating frame builder */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+    u32 user_job_ptr;               /**< [out] identifier for the job in user space */
+    _mali_uk_job_status status;     /**< [out] status of finished job */
+    u32 heap_current_addr;          /**< [out] value of the GP PLB PL heap start address register */
+    u32 perf_counter0;              /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+    u32 perf_counter1;              /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+       _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY  /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+       u32 user_job_ptr;                    /**< [out] identifier for the job in user space */
+       _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+       u32 cookie;                          /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_BARRIER         (1<<1)
+#define _MALI_PP_JOB_FLAG_FENCE           (1<<2)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+    u32 user_job_ptr;               /**< [in] identifier for the job in user space */
+    u32 priority;                   /**< [in] job priority. A lower number means higher priority */
+    u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS];         /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+    u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+    u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+    u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+    u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+    u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+       u32 num_cores;                      /**< [in] Number of cores to set up (valid range: 1-4) */
+    u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+    u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+    u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+       u32 frame_builder_id;               /**< [in] id of the originating frame builder */
+       u32 flush_id;                       /**< [in] flush id within the originating frame builder */
+       u32 flags;                          /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+       s32 fence;                          /**< [in,out] Fence to wait on / fence that will be signalled on job completion, if _MALI_PP_JOB_FLAG_FENCE is set */
+       s32 stream;                         /**< [in] Steam identifier */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+    u32 user_job_ptr;                          /**< [out] identifier for the job in user space */
+    _mali_uk_job_status status;                /**< [out] status of finished job */
+    u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+    u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+} _mali_uk_pp_job_finished_s;
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum
+{
+       _MALI_UK_PP_JOB_WB0 = 1,
+       _MALI_UK_PP_JOB_WB1 = 2,
+       _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+    u32 fb_id;                      /**< [in] Frame builder ID of job to disable WB units for */
+    u32 flush_id;                   /**< [in] Flush ID of job to disable WB units for */
+    _mali_uk_pp_job_wbx_flag wbx;   /**< [in] write-back units to disable */
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ *            | (( id <<  _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+       /** core notifications */
+
+       _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS =  (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+       _MALI_NOTIFICATION_APPLICATION_QUIT =           (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+       _MALI_NOTIFICATION_SETTINGS_CHANGED =           (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+
+       /** Fragment Processor notifications */
+
+       _MALI_NOTIFICATION_PP_FINISHED =                (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+
+       /** Vertex Processor notifications */
+
+       _MALI_NOTIFICATION_GP_FINISHED =                (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_GP_STALLED =                 (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum
+{
+       _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+       _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+       _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+       _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+{                                           \
+       "sw_events_enable",                 \
+       "colorbuffer_capture_enable",       \
+       "depthbuffer_capture_enable",       \
+       "stencilbuffer_capture_enable",     \
+       "per_tile_counters_enable",         \
+       "buffer_capture_compositor",        \
+       "buffer_capture_window",            \
+       "buffer_capture_other",             \
+       "buffer_capture_n_frames",          \
+       "buffer_capture_resize_factor",     \
+       "sw_counters_enable",               \
+};
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct
+{
+       _mali_uk_user_setting_t setting;
+       u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ *     - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ *     - In this case, the value of the data union member is undefined.
+ *     - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ *    - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ *    - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ *    - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ *    - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ *    - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ *     - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ *     - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ *     - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_notification_type type; /**< [out] Type of notification available */
+       union
+       {
+               _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+               _mali_uk_gp_job_finished_s  gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+               _mali_uk_pp_job_finished_s  pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+               _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+       } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+    void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 19
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
+       int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct
+{
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct
+{
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_user_setting_t setting; /**< [in] setting to get */
+       u32 value;                       /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mali_address_base;          /**< [out] start of MALI address space */
+       u32 memory_size;                /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 phys_addr;                  /**< [in] physical address */
+       u32 size;                       /**< [in] size */
+       u32 mali_address;               /**< [in] mali address to map the physical memory to */
+       u32 rights;                     /**< [in] rights necessary for accessing memory */
+       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_map_external_mem_s;
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mem_fd;                     /**< [in] Memory descriptor */
+       u32 size;                       /**< [in] size */
+       u32 mali_address;               /**< [in] mali address to map the physical memory to */
+       u32 rights;                     /**< [in] rights necessary for accessing memory */
+       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_attach_dma_buf_s;
+
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mem_fd;                     /**< [in] Memory descriptor */
+       u32 size;                       /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
+} _mali_uk_release_dma_buf_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;                  /**< [in] secure id */
+       u32 size;                       /**< [in] size */
+       u32 mali_address;               /**< [in] mali address to map the physical memory to */
+       u32 rights;                     /**< [in] rights necessary for accessing memory */
+       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_attach_ump_mem_s;
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *va;                       /**< [in,out] Virtual address of the start of the range */
+       u32 pa;                         /**< [out] Physical base address of the range */
+       u32 size;                       /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [in] size of buffer to receive mmu page table information */
+    void *buffer;                   /**< [in,out] buffer to receive mmu page table information */
+    u32 register_writes_size;       /**< [out] size of MMU register dump */
+       u32 *register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
+       u32 page_table_dump_size;       /**< [out] size of MMU page table dump */
+       u32 *page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+    u32 number_of_cores;            /**< [out] number of Fragment Processor cores in the system */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+    _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version  */
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+    u32 number_of_cores;            /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+    void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+    _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 limit;                      /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 event_id;                   /**< [in] event id to register (see  enum mali_profiling_events for values) */
+       u32 data[5];                    /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 count;                      /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 index;                      /**< [in] which index to get (starting at zero) */
+       u64 timestamp;                  /**< [out] timestamp of event */
+       u32 event_id;                   /**< [out] event id of event (see  enum mali_profiling_events for values) */
+       u32 data[5];                    /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the  OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
+       u32 size;                       /**< [in] Size of the requested mapping */
+       u32 phys_addr;                  /**< [in] Physical address - could be offset, depending on caller+callee convention */
+       u32 cookie;                     /**< [out] Returns a cookie for use in munmap calls */
+       void *uku_private;              /**< [in] User-side Private word used by U/K interface */
+       void *ukk_private;              /**< [in] Kernel-side Private word used by U/K interface */
+       mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [in] The mapping returned from mmap call */
+       u32 size;                       /**< [in] The size passed to mmap call */
+       u32 cookie;                     /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+       _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+       _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_vsync_event event;     /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32* counters;                  /**< [in] The array of counter values */
+       u32  num_counters;              /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_stream U/K Mali stream module
+ * @{ */
+
+/** @brief Create stream
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       int fd;                         /**< [out] file descriptor describing stream */
+} _mali_uk_stream_create_s;
+
+/** @brief Destroy stream
+*/
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       int fd;                         /**< [in] file descriptor describing stream */
+} _mali_uk_stream_destroy_s;
+
+/** @brief Check fence validity
+ */
+typedef struct
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       int fd;                         /**< [in] file descriptor describing fence */
+} _mali_uk_fence_validate_s;
+
+/** @} */ /* end group _mali_uk_stream */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/arm/mali400/mali/linux/license/gpl/mali_kernel_license.h
new file mode 100644 (file)
index 0000000..bd4f9aa
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE     "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_dma_buf.c b/drivers/gpu/arm/mali400/mali/linux/mali_dma_buf.c
new file mode 100644 (file)
index 0000000..900c969
--- /dev/null
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/fs.h>     /* file system operations */
+#include <asm/uaccess.h>       /* user space access */
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_kernel_memory_engine.h"
+#include "mali_memory.h"
+
+
+struct mali_dma_buf_attachment {
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attachment;
+       struct sg_table *sgt;
+       _mali_osk_atomic_t ref;
+       struct rb_node rb_node;
+};
+
+static struct rb_root mali_dma_bufs = RB_ROOT;
+static DEFINE_SPINLOCK(mali_dma_bufs_lock);
+
+static inline struct mali_dma_buf_attachment *mali_dma_buf_lookup(struct rb_root *root, struct dma_buf *target)
+{
+       struct rb_node *node = root->rb_node;
+       struct mali_dma_buf_attachment *res;
+
+       spin_lock(&mali_dma_bufs_lock);
+       while (node)
+       {
+               res = rb_entry(node, struct mali_dma_buf_attachment, rb_node);
+
+               if (target < res->buf) node = node->rb_left;
+               else if (target > res->buf) node = node->rb_right;
+               else
+               {
+                       _mali_osk_atomic_inc(&res->ref);
+                       spin_unlock(&mali_dma_bufs_lock);
+                       return res;
+               }
+       }
+       spin_unlock(&mali_dma_bufs_lock);
+
+       return NULL;
+}
+
+static void mali_dma_buf_add(struct rb_root *root, struct mali_dma_buf_attachment *new)
+{
+       struct rb_node **node = &root->rb_node;
+       struct rb_node *parent = NULL;
+       struct mali_dma_buf_attachment *res;
+
+       spin_lock(&mali_dma_bufs_lock);
+       while (*node)
+       {
+               parent = *node;
+               res = rb_entry(*node, struct mali_dma_buf_attachment, rb_node);
+
+               if (new->buf < res->buf) node = &(*node)->rb_left;
+               else node = &(*node)->rb_right;
+       }
+
+       rb_link_node(&new->rb_node, parent, node);
+       rb_insert_color(&new->rb_node, &mali_dma_bufs);
+
+       spin_unlock(&mali_dma_bufs_lock);
+
+       return;
+}
+
+
+static void mali_dma_buf_release(void *ctx, void *handle)
+{
+       struct mali_dma_buf_attachment *mem;
+       u32 ref;
+
+       mem = (struct mali_dma_buf_attachment *)handle;
+
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+       spin_lock(&mali_dma_bufs_lock);
+       ref = _mali_osk_atomic_dec_return(&mem->ref);
+
+       if (0 == ref)
+       {
+               rb_erase(&mem->rb_node, &mali_dma_bufs);
+               spin_unlock(&mali_dma_bufs_lock);
+
+               MALI_DEBUG_ASSERT(0 == _mali_osk_atomic_read(&mem->ref));
+
+               dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+
+               dma_buf_detach(mem->buf, mem->attachment);
+               dma_buf_put(mem->buf);
+
+               _mali_osk_free(mem);
+       }
+       else
+       {
+               spin_unlock(&mali_dma_bufs_lock);
+       }
+}
+
+/* Callback from memory engine which will map into Mali virtual address space */
+static mali_physical_memory_allocation_result mali_dma_buf_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+       struct mali_session_data *session;
+       struct mali_page_directory *pagedir;
+       struct mali_dma_buf_attachment *mem;
+       struct scatterlist *sg;
+       int i;
+       u32 virt;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(engine);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_POINTER(offset);
+       MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+       /* Mapping dma-buf with an offset is not supported. */
+       MALI_DEBUG_ASSERT(0 == *offset);
+
+       virt = descriptor->mali_address;
+       session = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+       pagedir = mali_session_get_page_directory(session);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mem = (struct mali_dma_buf_attachment *)ctx;
+
+       MALI_DEBUG_ASSERT_POINTER(mem);
+
+       mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+       if (IS_ERR_OR_NULL(mem->sgt))
+       {
+               MALI_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+               return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+       }
+
+       for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i)
+       {
+               u32 size = sg_dma_len(sg);
+               dma_addr_t phys = sg_dma_address(sg);
+
+               /* sg must be page aligned. */
+               MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+
+               mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_CACHE_STANDARD);
+
+               virt += size;
+               *offset += size;
+       }
+
+       if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+       {
+               u32 guard_phys;
+               MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+               guard_phys = sg_dma_address(mem->sgt->sgl);
+               mali_mmu_pagedir_update(mali_session_get_page_directory(session), virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_CACHE_STANDARD);
+       }
+
+       MALI_DEBUG_ASSERT(*offset == descriptor->size);
+
+       alloc_info->ctx = NULL;
+       alloc_info->handle = mem;
+       alloc_info->next = NULL;
+       alloc_info->release = mali_dma_buf_release;
+
+       return MALI_MEM_ALLOC_FINISHED;
+}
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *user_arg)
+{
+       mali_physical_memory_allocator external_memory_allocator;
+       struct dma_buf *buf;
+       struct mali_dma_buf_attachment *mem;
+       _mali_uk_attach_dma_buf_s args;
+       mali_memory_allocation *descriptor;
+       int md;
+       int fd;
+
+       /* Get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_attach_dma_buf_s)))
+       {
+               return -EFAULT;
+       }
+
+
+       fd = args.mem_fd;
+
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf))
+       {
+               MALI_DEBUG_PRINT(2, ("Failed to get dma-buf from fd: %d\n", fd));
+               return PTR_RET(buf);
+       }
+
+       /* Currently, mapping of the full buffer are supported. */
+       if (args.size != buf->size)
+       {
+               MALI_DEBUG_PRINT(2, ("dma-buf size doesn't match mapping size.\n"));
+               dma_buf_put(buf);
+               return -EINVAL;
+       }
+
+
+       mem = mali_dma_buf_lookup(&mali_dma_bufs, buf);
+       if (NULL == mem)
+       {
+               /* dma-buf is not already attached to Mali */
+               mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+               if (NULL == mem)
+               {
+                       MALI_PRINT_ERROR(("Failed to allocate dma-buf tracing struct\n"));
+                       dma_buf_put(buf);
+                       return -ENOMEM;
+               }
+               _mali_osk_atomic_init(&mem->ref, 1);
+               mem->buf = buf;
+
+               mem->attachment = dma_buf_attach(mem->buf, &mali_platform_device->dev);
+               if (NULL == mem->attachment)
+               {
+                       MALI_DEBUG_PRINT(2, ("Failed to attach to dma-buf %d\n", fd));
+                       dma_buf_put(mem->buf);
+                       _mali_osk_free(mem);
+                       return -EFAULT;
+               }
+
+               mali_dma_buf_add(&mali_dma_bufs, mem);
+       }
+       else
+       {
+               /* dma-buf is already attached to Mali */
+               /* Give back the reference we just took, mali_dma_buf_lookup grabbed a new reference for us. */
+               dma_buf_put(buf);
+       }
+
+       /* Map dma-buf into this session's page tables */
+
+       /* Set up Mali memory descriptor */
+       descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+       if (NULL == descriptor)
+       {
+               MALI_PRINT_ERROR(("Failed to allocate descriptor dma-buf %d\n", fd));
+               mali_dma_buf_release(NULL, mem);
+               return -ENOMEM;
+       }
+
+       descriptor->size = args.size;
+       descriptor->mapping = NULL;
+       descriptor->mali_address = args.mali_address;
+       descriptor->mali_addr_mapping_info = (void*)session;
+       descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+       descriptor->lock = session->memory_lock;
+
+       if (args.flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+       {
+               descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+       }
+       _mali_osk_list_init( &descriptor->list );
+
+       /* Get descriptor mapping for memory. */
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md))
+       {
+               MALI_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
+               _mali_osk_free(descriptor);
+               mali_dma_buf_release(NULL, mem);
+               return -EFAULT;
+       }
+
+       external_memory_allocator.allocate = mali_dma_buf_commit;
+       external_memory_allocator.allocate_page_table_block = NULL;
+       external_memory_allocator.ctx = mem;
+       external_memory_allocator.name = "DMA-BUF Memory";
+       external_memory_allocator.next = NULL;
+
+       /* Map memory into session's Mali virtual address space. */
+       _mali_osk_lock_wait(session->memory_lock, _MALI_OSK_LOCKMODE_RW);
+       if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(mali_mem_get_memory_engine(), descriptor, &external_memory_allocator, NULL))
+       {
+               _mali_osk_lock_signal(session->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+               MALI_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
+               mali_descriptor_mapping_free(session->descriptor_mapping, md);
+               mali_dma_buf_release(NULL, mem);
+               return -ENOMEM;
+       }
+       _mali_osk_lock_signal(session->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+       /* Return stuff to user space */
+       if (0 != put_user(md, &user_arg->cookie))
+       {
+               /* Roll back */
+               MALI_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
+               mali_descriptor_mapping_free(session->descriptor_mapping, md);
+               mali_dma_buf_release(NULL, mem);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *user_arg)
+{
+       _mali_uk_release_dma_buf_s args;
+       mali_memory_allocation *descriptor;
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_release_dma_buf_s)) )
+       {
+               return -EFAULT;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args.cookie, (void**)&descriptor))
+       {
+               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
+               return -EINVAL;
+       }
+
+       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
+
+       if (NULL != descriptor)
+       {
+               _mali_osk_lock_wait( session->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+               /* Will call back to mali_dma_buf_release() which will release the dma-buf attachment. */
+               mali_allocation_engine_release_memory(mali_mem_get_memory_engine(), descriptor);
+
+               _mali_osk_lock_signal( session->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+               _mali_osk_free(descriptor);
+       }
+
+       /* Return the error that _mali_ukk_map_external_ump_mem produced */
+       return 0;
+}
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+       _mali_uk_dma_buf_get_size_s args;
+       int fd;
+       struct dma_buf *buf;
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s)) )
+       {
+               return -EFAULT;
+       }
+
+       /* Do DMA-BUF stuff */
+       fd = args.mem_fd;
+
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf))
+       {
+               MALI_DEBUG_PRINT(2, ("Failed to get dma-buf from fd: %d\n", fd));
+               return PTR_RET(buf);
+       }
+
+       if (0 != put_user(buf->size, &user_arg->size))
+       {
+               dma_buf_put(buf);
+               return -EFAULT;
+       }
+
+       dma_buf_put(buf);
+
+       return 0;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_dma_buf.h b/drivers/gpu/arm/mali400/mali/linux/mali_dma_buf.h
new file mode 100644 (file)
index 0000000..9d19773
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DMA_BUF_H__
+#define __MALI_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_osk.h"
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *arg);
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *arg);
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_kernel_linux.c b/drivers/gpu/arm/mali400/mali/linux/mali_kernel_linux.c
new file mode 100644 (file)
index 0000000..b1f2862
--- /dev/null
@@ -0,0 +1,760 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h>   /* kernel module definitions */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/cdev.h>     /* character device definitions */
+#include <linux/mm.h>       /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/of.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#if MALI_LICENSE_IS_GPL
+#include <linux/miscdevice.h>
+#endif
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_dma_buf.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+/* MALI_SEC */
+#include "../platform/exynos4/exynos4.h"
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+#endif /* CONFIG_TRACEPOINTS */
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern int mali_shared_mem_size;
+module_param(mali_shared_mem_size, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+#if CONFIG_MALI_DVFS
+#if 0
+extern int mali_gpu_clk;
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
+
+extern int mali_gpu_vol;
+module_param(mali_gpu_vol, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(mali_gpu_vol, "Mali Current Voltage");
+
+extern int gpu_power_state;
+module_param(gpu_power_state, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(gpu_power_state, "Mali Power State");
+#endif
+#endif
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+struct pm_ext_ops mali_dev_ext_pm_ops =
+{
+       .base =
+       {
+               .suspend = mali_driver_suspend_scheduler,
+               .resume = mali_driver_resume_scheduler,
+               .freeze = mali_driver_suspend_scheduler,
+               .thaw =   mali_driver_resume_scheduler,
+       },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops =
+{
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = mali_driver_runtime_suspend,
+       .runtime_resume = mali_driver_runtime_resume,
+       .runtime_idle = mali_driver_runtime_idle,
+#endif
+       .suspend = mali_driver_suspend_scheduler,
+       .resume = mali_driver_resume_scheduler,
+       .freeze = mali_driver_suspend_scheduler,
+       .thaw = mali_driver_resume_scheduler,
+};
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver =
+{
+       .probe  = mali_probe,
+       .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+       .pm = &mali_dev_ext_pm_ops,
+#endif
+       .driver =
+       {
+               .name   = "mali_dev", /* MALI_SEC MALI_GPU_NAME_UTGARD, */
+               .owner  = THIS_MODULE,
+               .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+               .pm = &mali_dev_pm_ops,
+#endif
+               .of_match_table = of_match_ptr(mali_of_matches),
+       },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops =
+{
+       .owner = THIS_MODULE,
+       .open = mali_open,
+       .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+       .unlocked_ioctl = mali_ioctl,
+#else
+       .ioctl = mali_ioctl,
+#endif
+       .mmap = mali_mmap
+};
+
+
+
+
+
+
+int mali_module_init(void)
+{
+       int err = 0;
+
+       MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+       MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+       MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+       /* Initialize module wide settings */
+       mali_osk_low_level_mem_init();
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+       MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+       err = mali_platform_device_register();
+       if (0 != err)
+       {
+               return err;
+       }
+#endif
+
+       MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+       err = platform_driver_register(&mali_platform_driver);
+
+       if (0 != err)
+       {
+               MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+               mali_platform_device_unregister();
+#endif
+               mali_platform_device = NULL;
+               return err;
+       }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+        err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+        if (0 != err)
+        {
+                /* No biggie if we wheren't able to initialize the profiling */
+                MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+        }
+#endif
+
+       MALI_PRINT(("Mali device driver loaded\n"));
+
+       return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+       MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+
+       MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+        _mali_internal_profiling_term();
+#endif
+
+       platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+       MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+       mali_platform_device_unregister();
+#endif
+
+       mali_osk_low_level_mem_term();
+
+       MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+static int mali_probe(struct platform_device *pdev)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+       if (NULL != mali_platform_device)
+       {
+               /* Already connected to a device, return error */
+               MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+               return -EEXIST;
+       }
+
+       mali_platform_device = pdev;
+
+       if (mali_platform_init() != _MALI_OSK_ERR_OK)
+       {
+               /* Platform-specific initialization failed, return error */
+               MALI_PRINT_ERROR(("mali_probe(): mali_platform_init() failed."));
+               return -EFAULT;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_wq_init())
+       {
+               /* Initialize the Mali GPU HW specified by pdev */
+               if (_MALI_OSK_ERR_OK == mali_initialize_subsystems())
+               {
+                       /* Register a misc device (so we are accessible from user space) */
+                       err = mali_miscdevice_register(pdev);
+                       if (0 == err)
+                       {
+                               /* Setup sysfs entries */
+                               err = mali_sysfs_register(mali_dev_name);
+                               if (0 == err)
+                               {
+                                       MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+                                       return 0;
+                               }
+                               else
+                               {
+                                       MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+                               }
+                               mali_miscdevice_unregister();
+                       }
+                       else
+                       {
+                               MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+                       }
+                       mali_terminate_subsystems();
+               }
+               else
+               {
+                       MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+               }
+               _mali_osk_wq_term();
+       }
+
+       mali_platform_device = NULL;
+       return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+       MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+       mali_sysfs_unregister();
+       mali_miscdevice_unregister();
+       mali_terminate_subsystems();
+       _mali_osk_wq_term();
+       mali_platform_deinit();
+       mali_platform_device = NULL;
+       return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+       int err;
+
+       mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+       mali_miscdevice.name = mali_dev_name;
+       mali_miscdevice.fops = &mali_fops;
+       mali_miscdevice.parent = get_device(&pdev->dev);
+
+       err = misc_register(&mali_miscdevice);
+       if (0 != err)
+       {
+               MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+       }
+
+       return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+       misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+       mali_pm_os_suspend();
+       /* MALI_SEC */
+       mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+       return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+       /* MALI_SEC */
+       mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+       mali_pm_os_resume();
+       return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+       mali_pm_runtime_suspend();
+       /* MALI_SEC */
+       mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+       return 0;
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+       /* MALI_SEC */
+       mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+       mali_pm_runtime_resume();
+       return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+       /* Nothing to do */
+       return 0;
+}
+#endif
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+       struct mali_session_data * session_data;
+       _mali_uk_mem_mmap_s args = {0, };
+
+    session_data = (struct mali_session_data *)filp->private_data;
+       if (NULL == session_data)
+       {
+               MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+               return -EFAULT;
+       }
+
+       MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+       /* Re-pack the arguments that mmap() packed for us */
+       args.ctx = session_data;
+       args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+       args.size = vma->vm_end - vma->vm_start;
+       args.ukk_private = vma;
+
+       if ( VM_SHARED== (VM_SHARED  & vma->vm_flags))
+       {
+               args.cache_settings = MALI_CACHE_STANDARD ;
+               MALI_DEBUG_PRINT(3,("Allocate - Standard - Size: %d kb\n", args.size/1024));
+       }
+       else
+       {
+               args.cache_settings = MALI_CACHE_GP_READ_ALLOCATE;
+               MALI_DEBUG_PRINT(3,("Allocate - GP Cached - Size: %d kb\n", args.size/1024));
+       }
+       /* Setting it equal to VM_SHARED and not Private, which would have made the later io_remap fail for MALI_CACHE_GP_READ_ALLOCATE */
+       vma->vm_flags = 0x000000fb;
+
+       /* Call the common mmap handler */
+       MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+    return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+       struct mali_session_data * session_data;
+    _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (mali_miscdevice.minor != iminor(inode))
+       {
+               MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+               return -ENODEV;
+       }
+
+       /* allocated struct to track this session */
+    err = _mali_ukk_open((void **)&session_data);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* initialize file pointer */
+       filp->f_pos = 0;
+
+       /* link in our session data */
+       filp->private_data = (void*)session_data;
+
+       return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+    _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (mali_miscdevice.minor != iminor(inode))
+       {
+               MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+               return -ENODEV;
+       }
+
+    err = _mali_ukk_close((void **)&filp->private_data);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+    switch(err)
+    {
+        case _MALI_OSK_ERR_OK : return 0;
+        case _MALI_OSK_ERR_FAULT: return -EFAULT;
+        case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+        case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+        case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+        case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+        case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+        case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+        default: return -EFAULT;
+    }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+       int err;
+       struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+       /* inode not used */
+       (void)inode;
+#endif
+
+       MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+       session_data = (struct mali_session_data *)filp->private_data;
+       if (NULL == session_data)
+       {
+               MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+               return -ENOTTY;
+       }
+
+       if (NULL == (void *)arg)
+       {
+               MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+               return -ENOTTY;
+       }
+
+       switch(cmd)
+       {
+               case MALI_IOC_WAIT_FOR_NOTIFICATION:
+                       err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+                       break;
+
+               case MALI_IOC_GET_API_VERSION:
+                       err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+                       break;
+
+               case MALI_IOC_POST_NOTIFICATION:
+                       err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+                       break;
+
+               case MALI_IOC_GET_USER_SETTINGS:
+                       err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+                       break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+               case MALI_IOC_PROFILING_START:
+                       err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PROFILING_ADD_EVENT:
+                       err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PROFILING_STOP:
+                       err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PROFILING_GET_EVENT:
+                       err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PROFILING_CLEAR:
+                       err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PROFILING_GET_CONFIG:
+                       /* Deprecated: still compatible with get_user_settings */
+                       err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+                       err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+                       break;
+
+#else
+
+               case MALI_IOC_PROFILING_START:              /* FALL-THROUGH */
+               case MALI_IOC_PROFILING_ADD_EVENT:          /* FALL-THROUGH */
+               case MALI_IOC_PROFILING_STOP:               /* FALL-THROUGH */
+               case MALI_IOC_PROFILING_GET_EVENT:          /* FALL-THROUGH */
+               case MALI_IOC_PROFILING_CLEAR:              /* FALL-THROUGH */
+               case MALI_IOC_PROFILING_GET_CONFIG:         /* FALL-THROUGH */
+               case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+                       MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+                       err = -ENOTTY;
+                       break;
+
+#endif
+
+               case MALI_IOC_MEM_INIT:
+                       err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_TERM:
+                       err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_MAP_EXT:
+                       err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_UNMAP_EXT:
+                       err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+                       err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+                       err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+                       break;
+
+#if defined(CONFIG_MALI400_UMP)
+
+               case MALI_IOC_MEM_ATTACH_UMP:
+                       err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_RELEASE_UMP:
+                       err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+                       break;
+
+#else
+
+               case MALI_IOC_MEM_ATTACH_UMP:
+               case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+                       MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+                       err = -ENOTTY;
+                       break;
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+               case MALI_IOC_MEM_ATTACH_DMA_BUF:
+                       err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_RELEASE_DMA_BUF:
+                       err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
+                       break;
+
+               case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+                       err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+                       break;
+#else
+
+               case MALI_IOC_MEM_ATTACH_DMA_BUF:   /* FALL-THROUGH */
+               case MALI_IOC_MEM_RELEASE_DMA_BUF:  /* FALL-THROUGH */
+               case MALI_IOC_MEM_DMA_BUF_GET_SIZE: /* FALL-THROUGH */
+                       MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+                       err = -ENOTTY;
+                       break;
+#endif
+
+               case MALI_IOC_PP_START_JOB:
+                       err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+                       err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PP_CORE_VERSION_GET:
+                       err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+                       break;
+
+               case MALI_IOC_PP_DISABLE_WB:
+                       err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+                       break;
+
+               case MALI_IOC_GP2_START_JOB:
+                       err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+                       break;
+
+               case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+                       err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+                       break;
+
+               case MALI_IOC_GP2_CORE_VERSION_GET:
+                       err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+                       break;
+
+               case MALI_IOC_GP2_SUSPEND_RESPONSE:
+                       err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+                       break;
+
+               case MALI_IOC_VSYNC_EVENT_REPORT:
+                       err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+                       break;
+
+               case MALI_IOC_STREAM_CREATE:
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+                       err = stream_create_wrapper(session_data, (_mali_uk_stream_create_s __user *)arg);
+#else
+                       err = -ENOTTY;
+#endif
+                       break;
+#endif
+               case MALI_IOC_FENCE_VALIDATE:
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+                       err = sync_fence_validate_wrapper(session_data, (_mali_uk_fence_validate_s __user *)arg);
+#else
+                       MALI_DEBUG_PRINT(2, ("Sync objects not supported\n"));
+                       err = -ENOTTY;
+#endif
+                       break;
+#else
+                       MALI_DEBUG_PRINT(2, ("Sync objects not supported\n"));
+                       err = -ENOTTY;
+                       break;
+#endif
+
+               case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
+               case MALI_IOC_MEM_FREE_BIG_BLOCK:
+                       MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
+                       err = -ENOTTY;
+                       break;
+
+               default:
+                       MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+                       err = -ENOTTY;
+       };
+
+       return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_kernel_linux.h b/drivers/gpu/arm/mali400/mali/linux/mali_kernel_linux.h
new file mode 100644 (file)
index 0000000..2d551af
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/cdev.h>     /* character device definitions */
+#include "mali_kernel_license.h"
+#include "mali_osk.h"
+
+extern struct platform_device *mali_platform_device;
+
+#if MALI_LICENSE_IS_GPL
+/* Defined in mali_osk_irq.h */
+extern struct workqueue_struct * mali_wq;
+#endif
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_kernel_sysfs.c b/drivers/gpu/arm/mali400/mali/linux/mali_kernel_sysfs.c
new file mode 100644 (file)
index 0000000..49779d1
--- /dev/null
@@ -0,0 +1,1442 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+#include "mali_pm.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_device_pause_resume.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum
+{
+        _MALI_DEVICE_SUSPEND,
+        _MALI_DEVICE_RESUME,
+        _MALI_DEVICE_DVFS_PAUSE,
+        _MALI_DEVICE_DVFS_RESUME,
+        _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char* const mali_power_events[_MALI_MAX_EVENTS] = {
+        [_MALI_DEVICE_SUSPEND] = "suspend",
+        [_MALI_DEVICE_RESUME] = "resume",
+        [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+        [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static u32 virtual_power_status_register = 0;
+static char pwr_buf[POWER_BUFFER_SIZE];
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t gp_gpx_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 val;
+
+       if (0 == src_id)
+       {
+               val = mali_gp_job_get_gp_counter_src0();
+       }
+       else
+       {
+               val = mali_gp_job_get_gp_counter_src1();
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val)
+       {
+               r = sprintf(buf, "-1\n");
+       }
+       else
+       {
+               r = sprintf(buf, "%u\n", val);
+       }
+       return simple_read_from_buffer(ubuf, cnt, gpos, buf, r);
+}
+
+static ssize_t gp_gpx_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val < 0)
+       {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       if (0 == src_id)
+       {
+               if (MALI_TRUE != mali_gp_job_set_gp_counter_src0((u32)val))
+               {
+                       return 0;
+               }
+       }
+       else
+       {
+               if (MALI_TRUE != mali_gp_job_set_gp_counter_src1((u32)val))
+               {
+                       return 0;
+               }
+       }
+
+       *gpos += cnt;
+       return cnt;
+}
+
+static ssize_t gp_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+       u32 num_groups;
+       int i;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val < 0)
+       {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       num_groups = mali_group_get_glob_num_groups();
+       for (i = 0; i < num_groups; i++)
+       {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+               if (NULL != gp_core)
+               {
+                       if (0 == src_id)
+                       {
+                               if (MALI_TRUE != mali_gp_job_set_gp_counter_src0((u32)val))
+                               {
+                                       return 0;
+                               }
+                       }
+                       else
+                       {
+                               if (MALI_TRUE != mali_gp_job_set_gp_counter_src1((u32)val))
+                               {
+                                       return 0;
+                               }
+                       }
+               }
+       }
+
+       *gpos += cnt;
+       return cnt;
+}
+
+static ssize_t gp_gpx_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+       return gp_gpx_counter_srcx_read(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_gpx_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+       return gp_gpx_counter_srcx_read(filp, ubuf, cnt, gpos, 1);
+}
+
+static ssize_t gp_gpx_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+       return gp_gpx_counter_srcx_write(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_gpx_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+       return gp_gpx_counter_srcx_write(filp, ubuf, cnt, gpos, 1);
+}
+
+static ssize_t gp_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+       return gp_all_counter_srcx_write(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+       return gp_all_counter_srcx_write(filp, ubuf, cnt, gpos, 1);
+}
+
+static const struct file_operations gp_gpx_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = gp_gpx_counter_src0_read,
+       .write = gp_gpx_counter_src0_write,
+};
+
+static const struct file_operations gp_gpx_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = gp_gpx_counter_src1_read,
+       .write = gp_gpx_counter_src1_write,
+};
+
+static const struct file_operations gp_all_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .write = gp_all_counter_src0_write,
+};
+
+static const struct file_operations gp_all_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .write = gp_all_counter_src1_write,
+};
+
+static ssize_t pp_ppx_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 val;
+
+       if (0 == src_id)
+       {
+               val = mali_pp_job_get_pp_counter_src0();
+       }
+       else
+       {
+               val = mali_pp_job_get_pp_counter_src1();
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val)
+       {
+               r = sprintf(buf, "-1\n");
+       }
+       else
+       {
+               r = sprintf(buf, "%u\n", val);
+       }
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t pp_ppx_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val < 0)
+       {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       if (0 == src_id)
+       {
+               if (MALI_TRUE != mali_pp_job_set_pp_counter_src0((u32)val))
+               {
+                       return 0;
+               }
+       }
+       else
+       {
+               if (MALI_TRUE != mali_pp_job_set_pp_counter_src1((u32)val))
+               {
+                       return 0;
+               }
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t pp_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+       u32 num_groups;
+       int i;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val < 0)
+       {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       num_groups = mali_group_get_glob_num_groups();
+       for (i = 0; i < num_groups; i++)
+       {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+               if (NULL != pp_core)
+               {
+                       if (0 == src_id)
+                       {
+                               if (MALI_TRUE != mali_pp_job_set_pp_counter_src0((u32)val))
+                               {
+                                       return 0;
+                               }
+                       }
+                       else
+                       {
+                               if (MALI_TRUE != mali_pp_job_set_pp_counter_src1((u32)val))
+                               {
+                                       return 0;
+                               }
+                       }
+               }
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t pp_ppx_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return pp_ppx_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_ppx_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return pp_ppx_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t pp_ppx_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return pp_ppx_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_ppx_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return pp_ppx_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t pp_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return pp_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return pp_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations pp_ppx_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = pp_ppx_counter_src0_read,
+       .write = pp_ppx_counter_src0_write,
+};
+
+static const struct file_operations pp_ppx_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = pp_ppx_counter_src1_read,
+       .write = pp_ppx_counter_src1_write,
+};
+
+static const struct file_operations pp_all_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .write = pp_all_counter_src0_write,
+};
+
+static const struct file_operations pp_all_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .write = pp_all_counter_src1_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 val;
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+       if (0 == src_id)
+       {
+               val = mali_l2_cache_core_get_counter_src0(l2_core);
+       }
+       else
+       {
+               val = mali_l2_cache_core_get_counter_src1(l2_core);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val)
+       {
+               r = sprintf(buf, "-1\n");
+       }
+       else
+       {
+               r = sprintf(buf, "%u\n", val);
+       }
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val < 0)
+       {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       if (0 == src_id)
+       {
+               if (MALI_TRUE != mali_l2_cache_core_set_counter_src0(l2_core, (u32)val))
+               {
+                       return 0;
+               }
+       }
+       else
+       {
+               if (MALI_TRUE != mali_l2_cache_core_set_counter_src1(l2_core, (u32)val))
+               {
+                       return 0;
+               }
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+       u32 l2_id;
+       struct mali_l2_cache_core *l2_cache;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val < 0)
+       {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       l2_id = 0;
+       l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+       while (NULL != l2_cache)
+       {
+               if (0 == src_id)
+               {
+                       if (MALI_TRUE != mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val))
+                       {
+                               return 0;
+                       }
+               }
+               else
+               {
+                       if (MALI_TRUE != mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val))
+                       {
+                               return 0;
+                       }
+               }
+
+               /* try next L2 */
+               l2_id++;
+               l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_src0_read,
+       .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_src1_read,
+       .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .write = l2_all_counter_src1_write,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+       char buf[32];
+
+       cnt = min(cnt, sizeof(buf) - 1);
+       if (copy_from_user(buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (0 != ret)
+       {
+               return ret;
+       }
+
+       /* Update setting (not exactly thread safe) */
+       if (1 == val && MALI_FALSE == power_always_on_enabled)
+       {
+               power_always_on_enabled = MALI_TRUE;
+               _mali_osk_pm_dev_ref_add();
+       }
+       else if (0 == val && MALI_TRUE == power_always_on_enabled)
+       {
+               power_always_on_enabled = MALI_FALSE;
+               _mali_osk_pm_dev_ref_dec();
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       if (MALI_TRUE == power_always_on_enabled)
+       {
+               return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+       }
+       else
+       {
+               return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+       }
+}
+
+static const struct file_operations power_always_on_fops = {
+       .owner = THIS_MODULE,
+       .read  = power_always_on_read,
+       .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+
+       if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_SUSPEND],strlen(mali_power_events[_MALI_DEVICE_SUSPEND])))
+       {
+               mali_pm_os_suspend();
+               /* @@@@ assuming currently suspend is successful later on to tune as per previous*/
+               virtual_power_status_register =1;
+
+       }
+       else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_RESUME],strlen(mali_power_events[_MALI_DEVICE_RESUME])))
+       {
+               mali_pm_os_resume();
+
+               /* @@@@ assuming currently resume is successful later on to tune as per previous */
+               virtual_power_status_register = 1;
+       }
+       else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_PAUSE],strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE])))
+       {
+               mali_bool power_on;
+               mali_dev_pause(&power_on);
+               if (!power_on)
+               {
+                       virtual_power_status_register = 2;
+                       mali_dev_resume();
+               }
+               else
+               {
+                       /*  @@@@ assuming currently resume is successful later on to tune as per previous */
+                       virtual_power_status_register =1;
+               }
+       }
+       else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_RESUME],strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME])))
+       {
+               mali_dev_resume();
+               /*  @@@@ assuming currently resume is successful later on to tune as per previous */
+               virtual_power_status_register = 1;
+       }
+       *ppos += cnt;
+       sprintf(pwr_buf, "%d",virtual_power_status_register);
+       return cnt;
+}
+
+static ssize_t power_power_events_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return simple_read_from_buffer(ubuf, cnt, ppos, pwr_buf, POWER_BUFFER_SIZE);
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+       file->f_pos = offset;
+        return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+       .owner = THIS_MODULE,
+       .read  = power_power_events_read,
+       .write = power_power_events_write,
+       .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+       u32 len = 0;
+       u32 size;
+       char *buf;
+
+       size = seq_get_buf(seq_file, &buf);
+
+       if(!size)
+       {
+                       return -ENOMEM;
+       }
+
+       /* Create the internal state dump. */
+       len  = snprintf(buf+len, size-len, "Mali device driver %s\n", SVN_REV_STRING);
+       len += snprintf(buf+len, size-len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+       len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+       seq_commit(seq_file, len);
+
+       return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+       .owner = THIS_MODULE,
+       .open = mali_seq_internal_state_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       int r;
+
+       r = sprintf(buf, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+       {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+       {
+               return ret;
+       }
+
+       if (val != 0)
+       {
+               u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+               /* check if we are already recording */
+               if (MALI_TRUE == _mali_internal_profiling_is_recording())
+               {
+                       MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+                       return -EFAULT;
+               }
+
+               /* check if we need to clear out an old recording first */
+               if (MALI_TRUE == _mali_internal_profiling_have_recording())
+               {
+                       if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear())
+                       {
+                               MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+                               return -EFAULT;
+                       }
+               }
+
+               /* start recording profiling data */
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit))
+               {
+                       MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+                       return -EFAULT;
+               }
+
+               MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+       }
+       else
+       {
+               /* stop recording profiling data */
+               u32 count = 0;
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count))
+               {
+                       MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+                       return -EFAULT;
+               }
+               
+               MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+       .owner = THIS_MODULE,
+       .read  = profiling_record_read,
+       .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+       loff_t *spos;
+
+       /* check if we have data avaiable */
+       if (MALI_TRUE != _mali_internal_profiling_have_recording())
+       {
+               return NULL;
+       }
+
+       spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+       if (NULL == spos)
+       {
+               return NULL;
+       }
+
+       *spos = *pos;
+       return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       loff_t *spos = v;
+
+       /* check if we have data avaiable */
+       if (MALI_TRUE != _mali_internal_profiling_have_recording())
+       {
+               return NULL;
+       }
+
+       /* check if the next entry actually is avaiable */
+       if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1))
+       {
+               return NULL;
+       }
+
+       *pos = ++*spos;
+       return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+       kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+       loff_t *spos = v;
+       u32 index;
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+
+       index = (u32)*spos;
+
+       /* Retrieve all events */
+       if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data))
+       {
+               seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+               return 0;
+       }
+
+       return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+       #define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+       static u64 start_time = 0;
+       loff_t *spos = v;
+       u32 index;
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+
+       index = (u32)*spos;
+
+       /* Retrieve all events */
+       if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data))
+       {
+               seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+               if (0 == index)
+               {
+                       start_time = timestamp;
+               }
+
+               seq_printf(seq_file, "[%06u] ", index);
+
+               switch(event_id & 0x0F000000)
+               {
+               case MALI_PROFILING_EVENT_TYPE_SINGLE:
+                       seq_printf(seq_file, "SINGLE | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_START:
+                       seq_printf(seq_file, "START | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_STOP:
+                       seq_printf(seq_file, "STOP | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+                       seq_printf(seq_file, "SUSPEND | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_RESUME:
+                       seq_printf(seq_file, "RESUME | ");
+                       break;
+               default:
+                       seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+                       break;
+               }
+
+               switch(event_id & 0x00FF0000)
+               {
+               case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+                       seq_printf(seq_file, "SW | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_GP0:
+                       seq_printf(seq_file, "GP0 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP0:
+                       seq_printf(seq_file, "PP0 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP1:
+                       seq_printf(seq_file, "PP1 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP2:
+                       seq_printf(seq_file, "PP2 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP3:
+                       seq_printf(seq_file, "PP3 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP4:
+                       seq_printf(seq_file, "PP4 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP5:
+                       seq_printf(seq_file, "PP5 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP6:
+                       seq_printf(seq_file, "PP6 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP7:
+                       seq_printf(seq_file, "PP7 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_GPU:
+                       seq_printf(seq_file, "GPU | ");
+                       break;
+               default:
+                       seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+                       break;
+               }
+
+               if (MALI_EVENT_ID_IS_HW(event_id))
+               {
+                       if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP))
+                       {
+                               switch(event_id & 0x0000FFFF)
+                               {
+                               case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+                                       seq_printf(seq_file, "PHYSICAL | ");
+                                       break;
+                               case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+                                       seq_printf(seq_file, "VIRTUAL | ");
+                                       break;
+                               default:
+                                       seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+                                       break;
+                               }
+                       }
+                       else
+                       {
+                               seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+                       }
+               }
+               else
+               {
+                       seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+               }
+
+               seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+               return 0;
+       }
+
+       return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+       .start = profiling_events_start,
+       .next  = profiling_events_next,
+       .stop  = profiling_events_stop,
+       .show  = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+       .owner = THIS_MODULE,
+       .open = profiling_events_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+       .start = profiling_events_start,
+       .next  = profiling_events_next,
+       .stop  = profiling_events_stop,
+       .show  = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+       .owner = THIS_MODULE,
+       .open = profiling_events_human_readable_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+#endif
+
+static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 mem = _mali_ukk_report_memory_usage();
+
+       r = snprintf(buf, 64, "%u\n", mem);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations memory_usage_fops = {
+       .owner = THIS_MODULE,
+       .read = memory_used_read,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval= _mali_ukk_utilization_gp_pp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval= _mali_ukk_utilization_gp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval= _mali_ukk_utilization_pp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+       _mali_uk_user_setting_t setting;
+       char buf[32];
+
+       cnt = min(cnt, sizeof(buf) - 1);
+       if (copy_from_user(buf, ubuf, cnt))
+       {
+               return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (0 != ret)
+       {
+               return ret;
+       }
+
+       /* Update setting */
+       setting = (_mali_uk_user_setting_t)(filp->private_data);
+       mali_set_user_setting(setting, val);
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 value;
+       _mali_uk_user_setting_t setting;
+
+       setting = (_mali_uk_user_setting_t)(filp->private_data);
+       value = mali_get_user_setting(setting);
+
+       r = snprintf(buf, 64, "%u\n", value);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+       .owner = THIS_MODULE,
+       .open = open_copy_private_data,
+       .read = user_settings_read,
+       .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+       struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+       if (mali_user_settings_dir != NULL)
+       {
+               int i;
+               for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++)
+               {
+                       debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void*)i, &user_settings_fops);
+               }
+       }
+
+       return 0;
+}
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+       mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+       if(ERR_PTR(-ENODEV) == mali_debugfs_dir)
+       {
+               /* Debugfs not supported. */
+               mali_debugfs_dir = NULL;
+       }
+       else
+       {
+               if(NULL != mali_debugfs_dir)
+               {
+                       /* Debugfs directory created successfully; create files now */
+                       struct dentry *mali_power_dir;
+                       struct dentry *mali_gp_dir;
+                       struct dentry *mali_pp_dir;
+                       struct dentry *mali_l2_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+                       struct dentry *mali_profiling_dir;
+#endif
+
+                       mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+                       if (mali_power_dir != NULL)
+                       {
+                               debugfs_create_file("always_on", 0400, mali_power_dir, NULL, &power_always_on_fops);
+                               debugfs_create_file("power_events", 0400, mali_power_dir, NULL, &power_power_events_fops);
+                       }
+
+                       mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+                       if (mali_gp_dir != NULL)
+                       {
+                               struct dentry *mali_gp_all_dir;
+                               u32 num_groups;
+                               int i;
+
+                               mali_gp_all_dir = debugfs_create_dir("all", mali_gp_dir);
+                               if (mali_gp_all_dir != NULL)
+                               {
+                                       debugfs_create_file("counter_src0", 0400, mali_gp_all_dir, NULL, &gp_all_counter_src0_fops);
+                                       debugfs_create_file("counter_src1", 0400, mali_gp_all_dir, NULL, &gp_all_counter_src1_fops);
+                               }
+
+                               num_groups = mali_group_get_glob_num_groups();
+                               for (i = 0; i < num_groups; i++)
+                               {
+                                       struct mali_group *group = mali_group_get_glob_group(i);
+
+                                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+                                       if (NULL != gp_core)
+                                       {
+                                               struct dentry *mali_gp_gpx_dir;
+                                               mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+                                               if (NULL != mali_gp_gpx_dir)
+                                               {
+                                                       debugfs_create_file("counter_src0", 0600, mali_gp_gpx_dir, gp_core, &gp_gpx_counter_src0_fops);
+                                                       debugfs_create_file("counter_src1", 0600, mali_gp_gpx_dir, gp_core, &gp_gpx_counter_src1_fops);
+                                               }
+                                               break; /* no need to look for any other GP cores */
+                                       }
+
+                               }
+                       }
+
+                       mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+                       if (mali_pp_dir != NULL)
+                       {
+                               struct dentry *mali_pp_all_dir;
+                               u32 num_groups;
+                               int i;
+
+                               mali_pp_all_dir = debugfs_create_dir("all", mali_pp_dir);
+                               if (mali_pp_all_dir != NULL)
+                               {
+                                       debugfs_create_file("counter_src0", 0400, mali_pp_all_dir, NULL, &pp_all_counter_src0_fops);
+                                       debugfs_create_file("counter_src1", 0400, mali_pp_all_dir, NULL, &pp_all_counter_src1_fops);
+                               }
+
+                               num_groups = mali_group_get_glob_num_groups();
+                               for (i = 0; i < num_groups; i++)
+                               {
+                                       struct mali_group *group = mali_group_get_glob_group(i);
+
+                                       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+                                       if (NULL != pp_core)
+                                       {
+                                               char buf[16];
+                                               struct dentry *mali_pp_ppx_dir;
+                                               _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+                                               mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+                                               if (NULL != mali_pp_ppx_dir)
+                                               {
+                                                       debugfs_create_file("counter_src0", 0600, mali_pp_ppx_dir, pp_core, &pp_ppx_counter_src0_fops);
+                                                       debugfs_create_file("counter_src1", 0600, mali_pp_ppx_dir, pp_core, &pp_ppx_counter_src1_fops);
+                                               }
+                                       }
+                               }
+                       }
+
+                       mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+                       if (mali_l2_dir != NULL)
+                       {
+                               struct dentry *mali_l2_all_dir;
+                               u32 l2_id;
+                               struct mali_l2_cache_core *l2_cache;
+
+                               mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+                               if (mali_l2_all_dir != NULL)
+                               {
+                                       debugfs_create_file("counter_src0", 0400, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+                                       debugfs_create_file("counter_src1", 0400, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+                               }
+
+                               l2_id = 0;
+                               l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+                               while (NULL != l2_cache)
+                               {
+                                       char buf[16];
+                                       struct dentry *mali_l2_l2x_dir;
+                                       _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+                                       mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+                                       if (NULL != mali_l2_l2x_dir)
+                                       {
+                                               debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+                                               debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+                                       }
+
+                                       /* try next L2 */
+                                       l2_id++;
+                                       l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+                               }
+                       }
+
+                       debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+                       debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+                       debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+                       debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+                       mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+                       if (mali_profiling_dir != NULL)
+                       {
+                               struct dentry *mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+                               if (mali_profiling_proc_dir != NULL)
+                               {
+                                       struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+                                       if (mali_profiling_proc_default_dir != NULL)
+                                       {
+                                               debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void*)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+                                       }
+                               }
+                               debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+                               debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+                               debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+                       }
+#endif
+
+#if MALI_STATE_TRACKING
+                       debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+                       if (mali_sysfs_user_settings_register())
+                       {
+                               /* Failed to create the debugfs entries for the user settings DB. */
+                               MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+                       }
+               }
+       }
+
+       /* Success! */
+       return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+       if(NULL != mali_debugfs_dir)
+       {
+               debugfs_remove_recursive(mali_debugfs_dir);
+       }
+       return 0;
+}
+
+#else
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+       return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+       return 0;
+}
+
+
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_kernel_sysfs.h b/drivers/gpu/arm/mali400/mali/linux/mali_kernel_sysfs.h
new file mode 100644 (file)
index 0000000..31e7bc6
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_linux_pm_testsuite.h b/drivers/gpu/arm/mali400/mali/linux/mali_linux_pm_testsuite.h
new file mode 100644 (file)
index 0000000..db57d21
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __MALI_LINUX_PM_TESTSUITE_H__
+#define __MALI_LINUX_PM_TESTSUITE_H__
+
+#if MALI_POWER_MGMT_TEST_SUITE && defined(CONFIG_PM)
+
+typedef enum
+{
+        _MALI_DEVICE_PMM_TIMEOUT_EVENT,
+        _MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS,
+       _MALI_DEVICE_PMM_REGISTERED_CORES,
+        _MALI_DEVICE_MAX_PMM_EVENTS
+
+} _mali_device_pmm_recording_events;
+
+extern unsigned int mali_timeout_event_recording_on;
+extern unsigned int mali_job_scheduling_events_recording_on;
+extern unsigned int pwr_mgmt_status_reg;
+extern unsigned int is_mali_pmm_testsuite_enabled;
+extern unsigned int is_mali_pmu_present;
+
+#endif /* MALI_POWER_MGMT_TEST_SUITE && defined(CONFIG_PM) */
+
+#endif /* __MALI_LINUX_PM_TESTSUITE_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_linux_trace.h b/drivers/gpu/arm/mali400/mali/linux/mali_linux_trace.h
new file mode 100644 (file)
index 0000000..915240a
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef  TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called 
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield 
+ *  encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+    TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, 
+        unsigned int d2, unsigned int d3, unsigned int d4),
+
+    TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+    TP_STRUCT__entry(
+        __field(unsigned int, event_id)
+        __field(unsigned int, d0)
+        __field(unsigned int, d1)
+        __field(unsigned int, d2)
+        __field(unsigned int, d3)
+        __field(unsigned int, d4)
+    ),
+
+    TP_fast_assign(
+        __entry->event_id = event_id;
+        __entry->d0 = d0;
+        __entry->d1 = d1;
+        __entry->d2 = d2;
+        __entry->d3 = d3;
+        __entry->d4 = d4;
+    ),
+
+    TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+    TP_PROTO(unsigned int counter_id, unsigned int value),
+
+    TP_ARGS(counter_id, value),
+
+    TP_STRUCT__entry(
+        __field(unsigned int, counter_id)
+        __field(unsigned int, value)
+    ),
+
+    TP_fast_assign(
+        __entry->counter_id = counter_id;
+    ),
+
+    TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+);
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+    TP_PROTO(pid_t pid, pid_t tid, void * surface_id, unsigned int * counters),
+
+    TP_ARGS(pid, tid, surface_id, counters),
+
+    TP_STRUCT__entry(
+            __field(pid_t, pid)
+            __field(pid_t, tid)
+            __field(void *, surface_id)
+            __field(unsigned int *, counters)
+    ),
+
+    TP_fast_assign(
+            __entry->pid = pid;
+                       __entry->tid = tid;
+                       __entry->surface_id = surface_id;
+                       __entry->counters = counters;
+    ),
+
+    TP_printk("counters were %s", __entry->counters == NULL? "NULL" : "not NULL")
+);
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_atomics.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_atomics.c
new file mode 100644 (file)
index 0000000..6c135c3
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+    atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+    return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+    atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+    return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+    MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+    atomic_set((atomic_t *)&atom->u.val, val);
+    return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+    return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+    MALI_IGNORE(atom);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_irq.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_irq.c
new file mode 100644 (file)
index 0000000..fbfe830
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h>        /* For memory allocation */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+       u32 irqnum;
+       void *data;
+       _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description )
+{
+       mali_osk_irq_object_t *irq_object;
+
+       irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+       if (NULL == irq_object)
+       {
+               return NULL;
+       }
+
+       if (-1 == irqnum)
+       {
+               /* Probe for IRQ */
+               if ( (NULL != trigger_func) && (NULL != ack_func) )
+               {
+                       unsigned long probe_count = 3;
+                       _mali_osk_errcode_t err;
+                       int irq;
+
+                       MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+                       do
+                       {
+                               unsigned long mask;
+
+                               mask = probe_irq_on();
+                               trigger_func(probe_data);
+
+                               _mali_osk_time_ubusydelay(5);
+
+                               irq = probe_irq_off(mask);
+                               err = ack_func(probe_data);
+                       }
+                       while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+                       if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+                       else irqnum = irq;
+               }
+               else irqnum = -1; /* no probe functions, fault */
+
+               if (-1 != irqnum)
+               {
+                       /* found an irq */
+                       MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+               }
+               else
+               {
+                       MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+               }
+       }
+
+       irq_object->irqnum = irqnum;
+       irq_object->uhandler = uhandler;
+       irq_object->data = int_data;
+
+       if (-1 == irqnum)
+       {
+               MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+
+       if (0 != request_irq(irqnum, irq_handler_upper_half, IRQF_SHARED, description, irq_object))
+       {
+               MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+
+       return irq_object;
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+       mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+       free_irq(irq_object->irqnum, irq_object);
+       kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+       mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+       if (irq_object->uhandler(irq_object->data) == _MALI_OSK_ERR_OK)
+       {
+               return IRQ_HANDLED;
+       }
+       return IRQ_NONE;
+}
+
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_locks.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_locks.c
new file mode 100644 (file)
index 0000000..2ba952e
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+       _MALI_OSK_INTERNAL_LOCKTYPE_SPIN,            /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+       _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ,        /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+       _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX,           /* Interruptable, use mutex_unlock()/down_interruptable() */
+       _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT,    /* Non-Interruptable, use mutex_unlock()/down() */
+       _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {mutex_unlock,down}{read,write}() */
+
+       /* Linux supports, but we do not support:
+        * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+        */
+
+       /* Linux does not support:
+        * One-locks, of any sort - no optimization for this fact will be made.
+        */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+    _mali_osk_internal_locktype type;
+       unsigned long flags;
+    union
+    {
+        spinlock_t spinlock;
+       struct mutex mutex;
+        struct rw_semaphore rw_sema;
+    } obj;
+       MALI_DEBUG_CODE(
+                                 /** original flags for debug checking */
+                                 _mali_osk_lock_flags_t orig_flags;
+
+                                 /* id of the thread currently holding this lock, 0 if no
+                                  * threads hold it. */
+                                 u32 owner;
+                                 /* number of owners this lock currently has (can be > 1 if
+                                  * taken in R/O mode. */
+                                 u32 nOwners;
+                                 /* what mode the lock was taken in */
+                                 _mali_osk_lock_mode_t mode;
+       ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+    _mali_osk_lock_t *lock = NULL;
+
+       /* Validate parameters: */
+       /* Flags acceptable */
+       MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+                                      | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+                                      | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+                                      | _MALI_OSK_LOCKFLAG_READERWRITER
+                                      | _MALI_OSK_LOCKFLAG_ORDERED
+                                      | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+       /* Spinlocks are always non-interruptable */
+       MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+                                        || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+       /* Parameter initial SBZ - for future expansion */
+       MALI_DEBUG_ASSERT( 0 == initial );
+
+       lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+       if ( NULL == lock )
+       {
+               return lock;
+       }
+
+       /* Determine type of mutex: */
+    /* defaults to interruptable mutex if no flags are specified */
+
+       if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+       {
+               /* Non-interruptable Spinlocks override all others */
+               lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+               spin_lock_init( &lock->obj.spinlock );
+       }
+       else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+       {
+               lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+               lock->flags = 0;
+               spin_lock_init( &lock->obj.spinlock );
+       }
+       else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+                         && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+       {
+               lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+               init_rwsem( &lock->obj.rw_sema );
+       }
+       else
+       {
+               /* Usual mutex types */
+               if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+               {
+                       lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+               }
+               else
+               {
+                       lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+               }
+
+               /* Initially unlocked */
+               mutex_init(&lock->obj.mutex);
+       }
+
+#ifdef DEBUG
+       /* Debug tracking of flags */
+       lock->orig_flags = flags;
+
+       /* Debug tracking of lock owner */
+       lock->owner = 0;
+       lock->nOwners = 0;
+#endif /* DEBUG */
+
+    return lock;
+}
+
+#ifdef DEBUG
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock )
+{
+       return lock->owner;
+}
+
+u32 _mali_osk_lock_get_number_owners( _mali_osk_lock_t *lock )
+{
+       return lock->nOwners;
+}
+
+u32 _mali_osk_lock_get_mode( _mali_osk_lock_t *lock )
+{
+       return lock->mode;
+}
+#endif /* DEBUG */
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+    _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+       /* Parameter validation */
+       MALI_DEBUG_ASSERT_POINTER( lock );
+
+       MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+                                        || _MALI_OSK_LOCKMODE_RO == mode );
+
+       /* Only allow RO locks when the initial object was a Reader/Writer lock
+        * Since information is lost on the internal locktype, we use the original
+        * information, which is only stored when built for DEBUG */
+       MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+                                        || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+       switch ( lock->type )
+       {
+       case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+               spin_lock(&lock->obj.spinlock);
+               break;
+       case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+               {
+                       unsigned long tmp_flags;
+                       spin_lock_irqsave(&lock->obj.spinlock, tmp_flags);
+                       lock->flags = tmp_flags;
+               }
+               break;
+
+       case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+               if (mutex_lock_interruptible(&lock->obj.mutex))
+               {
+                       MALI_PRINT_ERROR(("Can not lock mutex\n"));
+                       err = _MALI_OSK_ERR_RESTARTSYSCALL;
+               }
+               break;
+
+       case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+               mutex_lock(&lock->obj.mutex);
+               break;
+
+       case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+               if (mode == _MALI_OSK_LOCKMODE_RO)
+        {
+            down_read(&lock->obj.rw_sema);
+        }
+        else
+        {
+            down_write(&lock->obj.rw_sema);
+        }
+               break;
+
+       default:
+               /* Reaching here indicates a programming error, so you will not get here
+                * on non-DEBUG builds */
+               MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+               break;
+       }
+
+#ifdef DEBUG
+       /* This thread is now the owner of this lock */
+       if (_MALI_OSK_ERR_OK == err)
+       {
+               if (mode == _MALI_OSK_LOCKMODE_RW)
+               {
+                       /*MALI_DEBUG_ASSERT(0 == lock->owner);*/
+                       if (0 != lock->owner)
+                       {
+                               printk(KERN_ERR "%d: ERROR: Lock %p already has owner %d\n", _mali_osk_get_tid(), lock, lock->owner);
+                               dump_stack();
+                       }
+                       lock->owner = _mali_osk_get_tid();
+                       lock->mode = mode;
+                       ++lock->nOwners;
+               }
+               else /* mode == _MALI_OSK_LOCKMODE_RO */
+               {
+                       lock->owner |= _mali_osk_get_tid();
+                       lock->mode = mode;
+                       ++lock->nOwners;
+               }
+       }
+#endif
+
+    return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+       /* Parameter validation */
+       MALI_DEBUG_ASSERT_POINTER( lock );
+
+       MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+                                        || _MALI_OSK_LOCKMODE_RO == mode );
+
+       /* Only allow RO locks when the initial object was a Reader/Writer lock
+        * Since information is lost on the internal locktype, we use the original
+        * information, which is only stored when built for DEBUG */
+       MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+                                        || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+#ifdef DEBUG
+       /* make sure the thread releasing the lock actually was the owner */
+       if (mode == _MALI_OSK_LOCKMODE_RW)
+       {
+               /*MALI_DEBUG_ASSERT(_mali_osk_get_tid() == lock->owner);*/
+               if (_mali_osk_get_tid() != lock->owner)
+               {
+                       printk(KERN_ERR "%d: ERROR: Lock %p owner was %d\n", _mali_osk_get_tid(), lock, lock->owner);
+                       dump_stack();
+               }
+               /* This lock now has no owner */
+               lock->owner = 0;
+               --lock->nOwners;
+       }
+       else /* mode == _MALI_OSK_LOCKMODE_RO */
+       {
+               if ((_mali_osk_get_tid() & lock->owner) != _mali_osk_get_tid())
+               {
+                       printk(KERN_ERR "%d: ERROR: Not an owner of %p lock.\n", _mali_osk_get_tid(), lock);
+                       dump_stack();
+               }
+
+               /* if this is the last thread holding this lock in R/O mode, set owner
+                * back to 0 */
+               if (0 == --lock->nOwners)
+               {
+                       lock->owner = 0;
+               }
+       }
+#endif /* DEBUG */
+
+       switch ( lock->type )
+       {
+       case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+               spin_unlock(&lock->obj.spinlock);
+               break;
+       case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+               spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+               break;
+
+       case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+               /* FALLTHROUGH */
+       case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+               mutex_unlock(&lock->obj.mutex);
+               break;
+
+       case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+               if (mode == _MALI_OSK_LOCKMODE_RO)
+        {
+            up_read(&lock->obj.rw_sema);
+        }
+        else
+        {
+            up_write(&lock->obj.rw_sema);
+        }
+               break;
+
+       default:
+               /* Reaching here indicates a programming error, so you will not get here
+                * on non-DEBUG builds */
+               MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+               break;
+       }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+       /* Parameter validation  */
+       MALI_DEBUG_ASSERT_POINTER( lock );
+
+       /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+    kfree(lock);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_low_level_mem.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_low_level_mem.c
new file mode 100644 (file)
index 0000000..a97f990
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
+#include <linux/shrinker.h>
+#endif
+/* MALI_SEC */
+#ifdef CONFIG_SLP
+#include <linux/memcontrol.h>
+#endif
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+#ifdef CONFIG_SLP_MALI_DBG
+#include <mach/regs-pmu.h>
+#endif
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+       int references;
+       u32 cookie;
+} mali_vma_usage_tracker;
+
+#define INVALID_PAGE 0xffffffff
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+       struct AllocationList *next;
+       u32 offset;
+       u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+       struct vm_area_struct *vma;
+       struct AllocationList *list;
+       struct AllocationList *tail;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+static DEFINE_SPINLOCK(allocation_list_spinlock);
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current  = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+       static int pre_allocated_memory_size_max      = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+       static int pre_allocated_memory_size_max      = 16 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+       .open = mali_kernel_memory_vma_open,
+       .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+       .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+       #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static int mali_mem_shrink(int nr_to_scan, gfp_t gfp_mask)
+       #else
+static int mali_mem_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+       #endif
+#else
+static int mali_mem_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif
+{
+       unsigned long flags;
+       AllocationList *item;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+       int nr = nr_to_scan;
+#else
+       int nr = sc->nr_to_scan;
+#endif
+
+       if (0 == nr)
+       {
+               return pre_allocated_memory_size_current / PAGE_SIZE;
+       }
+
+       if (0 == pre_allocated_memory_size_current)
+       {
+               /* No pages availble */
+               return 0;
+       }
+
+       if (0 == spin_trylock_irqsave(&allocation_list_spinlock, flags))
+       {
+               /* Not able to lock. */
+               return -1;
+       }
+
+       while (pre_allocated_memory && nr > 0)
+       {
+               item = pre_allocated_memory;
+               pre_allocated_memory = item->next;
+
+               _kernel_page_release(item->physaddr);
+               _mali_osk_free(item);
+
+               pre_allocated_memory_size_current -= PAGE_SIZE;
+               --nr;
+       }
+       spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+       return pre_allocated_memory_size_current / PAGE_SIZE;
+}
+
+struct shrinker mali_mem_shrinker = {
+       .shrink = mali_mem_shrink,
+       .seeks = DEFAULT_SEEKS,
+};
+
+void mali_osk_low_level_mem_init(void)
+{
+       pre_allocated_memory = (AllocationList*) NULL ;
+
+       register_shrinker(&mali_mem_shrinker);
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+       unregister_shrinker(&mali_mem_shrinker);
+
+       while ( NULL != pre_allocated_memory )
+       {
+               AllocationList *item;
+               item = pre_allocated_memory;
+               pre_allocated_memory = item->next;
+               _kernel_page_release(item->physaddr);
+               _mali_osk_free( item );
+       }
+       pre_allocated_memory_size_current  = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+       struct page *new_page;
+       u32 linux_phys_addr;
+
+       new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+       if ( NULL == new_page )
+       {
+               return INVALID_PAGE;
+       }
+
+/* MALI_SEC */
+#ifdef CONFIG_SLP
+       /* SLP: charging 3D allocated page */
+       mem_cgroup_newpage_charge(new_page, current->mm, GFP_HIGHUSER |
+       __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+#ifdef CONFIG_SLP_LOWMEM_NOTIFY
+       inc_mm_counter(current->mm, MM_ANONPAGES);
+#endif
+#endif
+       /* Ensure page is flushed from CPU caches. */
+       linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+       return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+       struct page *unmap_page;
+
+       #if 1
+       dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+       #endif
+
+       unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+       MALI_DEBUG_ASSERT_POINTER( unmap_page );
+/* MALI_SEC */
+#ifdef CONFIG_SLP
+       /* SLP: uncharging 3D allocated page */
+       mem_cgroup_uncharge_page(unmap_page);
+
+#ifdef CONFIG_SLP_LOWMEM_NOTIFY
+       if (current && current->mm)
+               dec_mm_counter(current->mm, MM_ANONPAGES);
+#endif
+#endif
+       __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+       AllocationList *item = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&allocation_list_spinlock,flags);
+       if ( pre_allocated_memory )
+       {
+               item = pre_allocated_memory;
+               pre_allocated_memory = pre_allocated_memory->next;
+               pre_allocated_memory_size_current -= PAGE_SIZE;
+
+               spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+               return item;
+       }
+       spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+       item = _mali_osk_malloc( sizeof(AllocationList) );
+       if ( NULL == item)
+       {
+               return NULL;
+       }
+
+       item->physaddr = _kernel_page_allocate();
+       if ( INVALID_PAGE == item->physaddr )
+       {
+               /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+               _mali_osk_free( item );
+               return NULL;
+       }
+       return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&allocation_list_spinlock,flags);
+       if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+       {
+               item->next = pre_allocated_memory;
+               pre_allocated_memory = item;
+               pre_allocated_memory_size_current += PAGE_SIZE;
+               spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+               return;
+       }
+       spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+       _kernel_page_release(item->physaddr);
+       _mali_osk_free( item );
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       void __user * address;
+       address = vmf->virtual_address;
+#endif
+       /*
+        * We always fail the call since all memory is pre-faulted when assigned to the process.
+        * Only the Mali cores can use page faults to extend buffers.
+       */
+
+       MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+       MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       return VM_FAULT_SIGBUS;
+#else
+       return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+       mali_vma_usage_tracker * vma_usage_tracker;
+       MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+       vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+       vma_usage_tracker->references++;
+
+       return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+       _mali_uk_mem_munmap_s args = {0, };
+       mali_memory_allocation * descriptor;
+       mali_vma_usage_tracker * vma_usage_tracker;
+       MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+       vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+       BUG_ON(!vma_usage_tracker);
+       BUG_ON(0 == vma_usage_tracker->references);
+
+       vma_usage_tracker->references--;
+
+       if (0 != vma_usage_tracker->references)
+       {
+               MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+               return;
+       }
+
+       /** @note args->context unused, initialized to 0.
+        * Instead, we use the memory_session from the cookie */
+
+       descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+       args.cookie = (u32)descriptor;
+       args.mapping = descriptor->mapping;
+       args.size = descriptor->size;
+
+       _mali_ukk_mem_munmap( &args );
+
+       /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+        * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+void _mali_osk_mem_barrier( void )
+{
+       mb();
+}
+
+void _mali_osk_write_mem_barrier( void )
+{
+       wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+       return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+       iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+       void * virt;
+       MALI_DEBUG_ASSERT_POINTER( phys );
+       MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+       MALI_DEBUG_ASSERT( 0 != size );
+
+       /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+        * 2 to 14 MiB is available. This should be enough for the page tables, which
+        * currently is the only user of this function. */
+       virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
+
+       MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+       if ( NULL == virt )
+       {
+               MALI_DEBUG_PRINT(5, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+               return 0;
+       }
+
+       MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+       return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+       MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+       MALI_DEBUG_ASSERT( 0 != size );
+       MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+       dma_free_coherent(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+#if MALI_LICENSE_IS_GPL
+       return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+       return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+#if !MALI_LICENSE_IS_GPL
+       release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
+{
+       __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+}
+
+#ifdef CONFIG_SLP_MALI_DBG
+void inline _mali_osk_mem_iowrite32_relaxed_cpu( volatile mali_io_address addr,
+                                                       u32 offset, u32 val )
+{
+       __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+}
+#endif
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+       return ioread32(((u8*)addr) + offset);
+}
+
+#ifdef CONFIG_SLP_MALI_DBG
+u32 inline _mali_osk_mem_ioread32_cpu(volatile mali_io_address addr, u32 offset)
+{
+       return ioread32(((u8*)addr) + offset);
+}
+#endif
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+       iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+       /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+       _mali_osk_write_mem_barrier();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+       struct vm_area_struct *vma;
+       mali_vma_usage_tracker * vma_usage_tracker;
+       MappingInfo *mappingInfo;
+
+       if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+       MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+       vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+       if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+       /* Re-write the process_addr_mapping_info */
+       mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+       if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+       vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+       if (NULL == vma_usage_tracker)
+       {
+               MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+               _mali_osk_free( mappingInfo );
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mappingInfo->vma = vma;
+       descriptor->process_addr_mapping_info = mappingInfo;
+
+       /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+       descriptor->mapping = (void __user*)vma->vm_start;
+       /* list member is already NULL */
+
+       /*
+         set some bits which indicate that:
+         The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+         The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+       */
+       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_DONTCOPY;
+
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+       vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+       vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+       vma->vm_private_data = vma_usage_tracker;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+       struct vm_area_struct* vma;
+       mali_vma_usage_tracker * vma_usage_tracker;
+       MappingInfo *mappingInfo;
+
+       if (NULL == descriptor) return;
+
+       MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+       mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+       MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+       /* Linux does the right thing as part of munmap to remove the mapping
+        * All that remains is that we remove the vma_usage_tracker setup in init() */
+       vma = mappingInfo->vma;
+
+       MALI_DEBUG_ASSERT_POINTER( vma );
+
+       /* ASSERT that there are no allocations on the list. Unmap should've been
+        * called on all OS allocations. */
+       MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+       vma_usage_tracker = vma->vm_private_data;
+
+       /* We only get called if mem_mapregion_init succeeded */
+       _mali_osk_free(vma_usage_tracker);
+
+       _mali_osk_free( mappingInfo );
+       return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+       struct vm_area_struct *vma;
+       MappingInfo *mappingInfo;
+
+       if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+       MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+       MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+       MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+       MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+       if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+       if (size > (descriptor->size - offset))
+       {
+               MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+                                   *phys_addr, size, descriptor->mapping, offset));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+       MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+       vma = mappingInfo->vma;
+
+       if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+       MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+       if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+       {
+               _mali_osk_errcode_t ret;
+               AllocationList *alloc_item;
+               u32 linux_phys_frame_num;
+
+               alloc_item = _allocation_list_item_get();
+               if (NULL == alloc_item)
+               {
+                       MALI_DEBUG_PRINT(1, ("Failed to allocate list item\n"));
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+
+               linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+               ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+               if ( ret != _MALI_OSK_ERR_OK)
+               {
+                       MALI_PRINT_ERROR(("%s %d could not remap_pfn_range()\n", __FUNCTION__, __LINE__));
+                       _allocation_list_item_release(alloc_item);
+                       return ret;
+               }
+
+               /* Put our alloc_item into the list of allocations on success */
+               if (NULL == mappingInfo->list)
+               {
+                       mappingInfo->list = alloc_item;
+               }
+               else
+               {
+                       mappingInfo->tail->next = alloc_item;
+               }
+
+               mappingInfo->tail = alloc_item;
+               alloc_item->next = NULL;
+               alloc_item->offset = offset;
+
+               /* Write out new physical address on success */
+               *phys_addr = alloc_item->physaddr;
+
+               return ret;
+       }
+
+       /* Otherwise, Use the supplied physical address */
+
+       /* ASSERT that supplied phys_addr is page aligned */
+       MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+       return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+       MappingInfo *mappingInfo;
+
+   if (NULL == descriptor) return;
+
+       MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+       MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+       MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+       if (NULL == descriptor->mapping) return;
+
+       if (size > (descriptor->size - offset))
+       {
+               MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+                                                       size, descriptor->mapping, offset));
+               return;
+       }
+       mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+       MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+       if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+       {
+               /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+                * so needs to be unmapped
+                */
+               while (size)
+               {
+                       /* First find the allocation in the list of allocations */
+                       AllocationList *alloc = mappingInfo->list;
+                       AllocationList **prev = &(mappingInfo->list);
+
+                       while (NULL != alloc && alloc->offset != offset)
+                       {
+                               prev = &(alloc->next);
+                               alloc = alloc->next;
+                       }
+                       if (alloc == NULL) {
+                               MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+                               size -= _MALI_OSK_CPU_PAGE_SIZE;
+                               offset += _MALI_OSK_CPU_PAGE_SIZE;
+                               continue;
+                       }
+
+                       *prev = alloc->next;
+                       _allocation_list_item_release(alloc);
+
+                       /* Move onto the next allocation */
+                       size -= _MALI_OSK_CPU_PAGE_SIZE;
+                       offset += _MALI_OSK_CPU_PAGE_SIZE;
+               }
+       }
+
+       /* Linux does the right thing as part of munmap to remove the mapping */
+
+       return;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_mali.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_mali.c
new file mode 100644 (file)
index 0000000..aef3212
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h"           /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "mali_kernel_linux.h"
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+       int i;
+
+       if (NULL == mali_platform_device)
+       {
+               /* Not connected to a device */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       for (i = 0; i < mali_platform_device->num_resources; i++)
+       {
+               if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+                   mali_platform_device->resource[i].start == addr)
+               {
+                       if (NULL != res)
+                       {
+                               res->base = addr;
+                               res->description = mali_platform_device->resource[i].name;
+
+                               /* Any (optional) IRQ resource belonging to this resource will follow */
+                               if ((i + 1) < mali_platform_device->num_resources &&
+                                   IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i+1])))
+                               {
+                                       res->irq = mali_platform_device->resource[i+1].start;
+                               }
+                               else
+                               {
+                                       res->irq = -1;
+                               }
+                       }
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_resource_base_address(void)
+{
+       u32 lowest_addr = 0xFFFFFFFF;
+       u32 ret = 0;
+
+       if (NULL != mali_platform_device)
+       {
+               int i;
+               for (i = 0; i < mali_platform_device->num_resources; i++)
+               {
+                       if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+                           mali_platform_device->resource[i].start < lowest_addr)
+                       {
+                               lowest_addr = mali_platform_device->resource[i].start;
+                               ret = lowest_addr;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data)
+{
+       MALI_DEBUG_ASSERT_POINTER(data);
+
+       if (NULL != mali_platform_device)
+       {
+               struct mali_gpu_device_data* os_data = NULL;
+
+               os_data = (struct mali_gpu_device_data*)mali_platform_device->dev.platform_data;
+               if (NULL != os_data)
+               {
+                       /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+                       data->dedicated_mem_start = os_data->dedicated_mem_start;
+                       data->dedicated_mem_size = os_data->dedicated_mem_size;
+                       data->shared_mem_size = os_data->shared_mem_size;
+                       data->fb_start = os_data->fb_start;
+                       data->fb_size = os_data->fb_size;
+                       data->utilization_interval = os_data->utilization_interval;
+                       data->utilization_handler = os_data->utilization_handler;
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_math.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_math.c
new file mode 100644 (file)
index 0000000..d6e3786
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+       return 32-fls(input);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_memory.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_memory.c
new file mode 100644 (file)
index 0000000..5d18d39
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+    return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+    return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+    kfree(ptr);
+}
+
+void inline *_mali_osk_valloc( u32 size )
+{
+    return vmalloc(size);
+}
+
+void inline _mali_osk_vfree( void *ptr )
+{
+    vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+    return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+    return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+       /* No need to prevent an out-of-memory dialogue appearing on Linux,
+        * so we always return MALI_TRUE.
+        */
+       return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_misc.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_misc.c
new file mode 100644 (file)
index 0000000..942e062
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+    va_list args;
+    va_start(args, fmt);
+    vprintk(fmt, args);
+       va_end(args);
+}
+
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
+{
+       int res;
+       va_list args;
+       va_start(args, fmt);
+
+       res = vscnprintf(buf, (size_t)size, fmt, args);
+
+       va_end(args);
+       return res;
+}
+
+void _mali_osk_abort(void)
+{
+       /* make a simple fault by dereferencing a NULL pointer */
+       dump_stack();
+       *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+       _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+       /* Thread group ID is the process ID on Linux */
+       return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+       /* pid is actually identifying the thread on Linux */
+       return (u32)current->pid;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_notification.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_notification.c
new file mode 100644 (file)
index 0000000..d631427
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+       spinlock_t mutex; /**< Mutex protecting the list */
+       wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+       struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+       struct list_head list;           /**< Internal linked list variable */
+       _mali_osk_notification_t data;   /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+       _mali_osk_notification_queue_t *        result;
+
+       result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+       if (NULL == result) return NULL;
+
+       spin_lock_init(&result->mutex);
+       init_waitqueue_head(&result->receive_queue);
+       INIT_LIST_HEAD(&result->head);
+
+       return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+       /* OPT Recycling of notification objects */
+       _mali_osk_notification_wrapper_t *notification;
+
+       notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size,
+                                                                   GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+       if (NULL == notification)
+       {
+               MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+               return NULL;
+       }
+
+       /* Init the list */
+       INIT_LIST_HEAD(&notification->list);
+
+       if (0 != size)
+       {
+               notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+       }
+       else
+       {
+               notification->data.result_buffer = NULL;
+       }
+
+       /* set up the non-allocating fields */
+       notification->data.notification_type = type;
+       notification->data.result_buffer_size = size;
+
+       /* all ok */
+       return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+       _mali_osk_notification_wrapper_t *notification;
+       MALI_DEBUG_ASSERT_POINTER( object );
+
+       notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+       /* Free the container */
+       kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+       MALI_DEBUG_ASSERT_POINTER( queue );
+
+       /* not much to do, just free the memory */
+       kfree(queue);
+}
+
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       unsigned long irq_flags;
+#endif
+
+       _mali_osk_notification_wrapper_t *notification;
+       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER( object );
+
+       notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+       spin_lock(&queue->mutex);
+#endif
+
+       list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+       spin_unlock(&queue->mutex);
+#endif
+
+       /* and wake up one possible exclusive waiter */
+       wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       unsigned long irq_flags;
+#endif
+
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+       spin_lock(&queue->mutex);
+#endif
+
+       if (!list_empty(&queue->head))
+       {
+               wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+               *result = &(wrapper_object->data);
+               list_del_init(&wrapper_object->list);
+               ret = _MALI_OSK_ERR_OK;
+       }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+       spin_unlock(&queue->mutex);
+#endif
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+    /* check input */
+       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER( result );
+
+       /* default result */
+       *result = NULL;
+
+       if (wait_event_interruptible(queue->receive_queue,
+                                    _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result)))
+       {
+               return _MALI_OSK_ERR_RESTARTSYSCALL;
+       }
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_pm.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_pm.c
new file mode 100644 (file)
index 0000000..f2351bd
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static _mali_osk_atomic_t mali_pm_ref_count;
+
+void _mali_osk_pm_dev_enable(void) /* @@@@ todo: change to init of some kind.. or change the way or where atomics are initialized? */
+{
+       _mali_osk_atomic_init(&mali_pm_ref_count, 0);
+}
+
+void _mali_osk_pm_dev_disable(void) /* @@@@ todo: change to term of some kind */
+{
+       _mali_osk_atomic_term(&mali_pm_ref_count);
+}
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       int err;
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+       if (0 > err)
+       {
+               MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       _mali_osk_atomic_inc(&mali_pm_ref_count);
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       _mali_osk_atomic_dec(&mali_pm_ref_count);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+       pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+       pm_runtime_put(&(mali_platform_device->dev));
+#endif
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+/* Can run in atomic context */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       u32 ref;
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       pm_runtime_get_noresume(&(mali_platform_device->dev));
+       ref = _mali_osk_atomic_read(&mali_pm_ref_count);
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+       return ref > 0 ? MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_TRUE;
+#endif
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+       pm_runtime_put(&(mali_platform_device->dev));
+#endif
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_profiling.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_profiling.c
new file mode 100644 (file)
index 0000000..9deb433
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+       if (MALI_TRUE == auto_start)
+       {
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+       /* Nothing to do */
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_profiling_get_count(void)
+{
+       return 0;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_clear(void)
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_osk_profiling_is_recording(void)
+{
+       return MALI_FALSE;
+}
+
+mali_bool _mali_osk_profiling_have_recording(void)
+{
+       return MALI_FALSE;
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+       trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+       return _mali_osk_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+       /* Always add process and thread identificator in the first two data elements for events from user space */
+       _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+       return _mali_osk_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+       return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+       return _mali_osk_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+       _mali_osk_profiling_report_sw_counters(args->counters);
+       return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ * 
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+       if (COUNTER_VP_C0 == counter_id)
+       {
+               if (MALI_TRUE == mali_gp_job_set_gp_counter_src0(event_id))
+               {
+                       return 1;
+               }
+       }
+       if (COUNTER_VP_C1 == counter_id)
+       {
+               if (MALI_TRUE == mali_gp_job_set_gp_counter_src1(event_id))
+               {
+                       return 1;
+               }
+       }
+       if (COUNTER_FP0_C0 <= counter_id && COUNTER_FP3_C1 >= counter_id)
+       {
+               u32 core_id = (counter_id - COUNTER_FP0_C0) >> 1;
+               struct mali_pp_core* pp_core = mali_pp_get_global_pp_core(core_id);
+
+               if (NULL != pp_core)
+               {
+                       if ((COUNTER_FP0_C0 == counter_id) || (COUNTER_FP0_C1 == counter_id))
+                       {
+                               u32 counter_src = (counter_id - COUNTER_FP0_C0) & 1;
+                               if (0 == counter_src)
+                               {
+                                       if (MALI_TRUE == mali_pp_job_set_pp_counter_src0(event_id))
+                                       {
+                                               return 1;
+                                       }
+                               }
+                               else
+                               {
+                                       if (MALI_TRUE == mali_pp_job_set_pp_counter_src1(event_id))
+                                       {
+                                       MALI_DEBUG_PRINT(5, ("MALI PROFILING SET EVENT core 0 counter_id = %d\n",counter_id));
+                                       return 1;
+                                       }
+                               }
+                       }
+               }
+       }
+       if (COUNTER_L2_C0 <= counter_id && COUNTER_L2_C1 >= counter_id)
+       {
+               u32 core_id = (counter_id - COUNTER_L2_C0) >> 1;
+               struct mali_l2_cache_core* l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+               if (NULL != l2_cache_core)
+               {
+                       u32 counter_src = (counter_id - COUNTER_L2_C0) & 1;
+                       if (0 == counter_src)
+                       {
+                               MALI_DEBUG_PRINT(5, ("SET EVENT L2 0 COUNTER\n"));
+                               if (MALI_TRUE == mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id))
+                               {
+                                       return 1;
+                               }
+                       }
+                       else
+                       {
+                               MALI_DEBUG_PRINT(5, ("SET EVENT L2 1 COUNTER\n"));
+                               if (MALI_TRUE == mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id))
+                               {
+                                       return 1;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for the first L2 cache. 
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism. 
+ *
+ * @param src0 First L2 cache counter ID.
+ * @param val0 First L2 cache counter value.
+ * @param src1 Second L2 cache counter ID.
+ * @param val1 Second L2 cache counter value.
+ */
+void _mali_profiling_get_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
+{
+        struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(0);
+        if (NULL != l2_cache)
+        {
+               if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache))
+               {
+                       /* It is now safe to access the L2 cache core in order to retrieve the counters */
+                       mali_l2_cache_core_get_counter_values(l2_cache, src0, val0, src1, val1);
+               }
+               mali_l2_cache_unlock_power_state(l2_cache);
+        }
+}
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+       switch(action)
+       {
+       case FBDUMP_CONTROL_ENABLE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+               break;
+       case FBDUMP_CONTROL_RATE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+               break;
+       case SW_COUNTER_ENABLE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+               break;
+       case FBDUMP_CONTROL_RESIZE_FACTOR:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+               break;
+       default:
+               break;  /* Ignore unimplemented actions */
+       }
+}
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_specific.h b/drivers/gpu/arm/mali400/mali/linux/mali_osk_specific.h
new file mode 100644 (file)
index 0000000..87c7a3c
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <asm/uaccess.h>
+
+#include "mali_sync.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+typedef struct sync_timeline mali_sync_tl;
+typedef struct sync_pt mali_sync_pt;
+
+MALI_STATIC_INLINE mali_sync_pt *_mali_osk_sync_pt_create(mali_sync_tl *parent)
+{
+       return (mali_sync_pt*)mali_sync_pt_alloc(parent);
+}
+
+MALI_STATIC_INLINE void _mali_osk_sync_pt_signal(mali_sync_pt *pt)
+{
+       mali_sync_signal_pt(pt, 0);
+}
+#endif
+#endif /* CONFIG_SYNC */
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+       return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+/** The list of events supported by the Mali DDK. */
+typedef enum
+{
+    /* Vertex processor activity */
+    ACTIVITY_VP = 0,
+
+    /* Fragment processor activity */
+    ACTIVITY_FP0,
+    ACTIVITY_FP1,
+    ACTIVITY_FP2,
+    ACTIVITY_FP3,
+
+    /* L2 cache counters */
+    COUNTER_L2_C0,
+    COUNTER_L2_C1,
+
+    /* Vertex processor counters */
+    COUNTER_VP_C0,
+    COUNTER_VP_C1,
+
+    /* Fragment processor counters */
+    COUNTER_FP0_C0,
+    COUNTER_FP0_C1,
+    COUNTER_FP1_C0,
+    COUNTER_FP1_C1,
+    COUNTER_FP2_C0,
+    COUNTER_FP2_C1,
+    COUNTER_FP3_C0,
+    COUNTER_FP3_C1,
+
+    /*
+     * If more hardware counters are added, the _mali_osk_hw_counter_table
+     * below should also be updated.
+     */
+
+    /* EGL software counters */
+    COUNTER_EGL_BLIT_TIME,
+
+    /* GLES software counters */
+    COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+    COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+    COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+    COUNTER_GLES_DRAW_ARRAYS_CALLS,
+    COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+    COUNTER_GLES_DRAW_POINTS,
+    COUNTER_GLES_DRAW_LINES,
+    COUNTER_GLES_DRAW_LINE_LOOP,
+    COUNTER_GLES_DRAW_LINE_STRIP,
+    COUNTER_GLES_DRAW_TRIANGLES,
+    COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+    COUNTER_GLES_DRAW_TRIANGLE_FAN,
+    COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+    COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+    COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+    COUNTER_GLES_UPLOAD_VBO_TIME,
+    COUNTER_GLES_NUM_FLUSHES,
+    COUNTER_GLES_NUM_VSHADERS_GENERATED,
+    COUNTER_GLES_NUM_FSHADERS_GENERATED,
+    COUNTER_GLES_VSHADER_GEN_TIME,
+    COUNTER_GLES_FSHADER_GEN_TIME,
+    COUNTER_GLES_INPUT_TRIANGLES,
+    COUNTER_GLES_VXCACHE_HIT,
+    COUNTER_GLES_VXCACHE_MISS,
+    COUNTER_GLES_VXCACHE_COLLISION,
+    COUNTER_GLES_CULLED_TRIANGLES,
+    COUNTER_GLES_CULLED_LINES,
+    COUNTER_GLES_BACKFACE_TRIANGLES,
+    COUNTER_GLES_GBCLIP_TRIANGLES,
+    COUNTER_GLES_GBCLIP_LINES,
+    COUNTER_GLES_TRIANGLES_DRAWN,
+    COUNTER_GLES_DRAWCALL_TIME,
+    COUNTER_GLES_TRIANGLES_COUNT,
+    COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+    COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+    COUNTER_GLES_FAN_TRIANGLES_COUNT,
+    COUNTER_GLES_LINES_COUNT,
+    COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+    COUNTER_GLES_STRIP_LINES_COUNT,
+    COUNTER_GLES_LOOP_LINES_COUNT,
+
+    /* Framebuffer capture pseudo-counter */
+    COUNTER_FILMSTRIP,
+
+    NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT    ACTIVITY_VP
+#define LAST_ACTIVITY_EVENT     ACTIVITY_FP3
+
+#define FIRST_HW_COUNTER        COUNTER_L2_C0
+#define LAST_HW_COUNTER         COUNTER_FP3_C1
+
+#define FIRST_SW_COUNTER        COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER         COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER   COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER    COUNTER_FILMSTRIP
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_time.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_time.c
new file mode 100644 (file)
index 0000000..2aa6588
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int    _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+    return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32    _mali_osk_time_mstoticks( u32 ms )
+{
+    return msecs_to_jiffies(ms);
+}
+
+u32    _mali_osk_time_tickstoms( u32 ticks )
+{
+    return jiffies_to_msecs(ticks);
+}
+
+u32    _mali_osk_time_tickcount( void )
+{
+    return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+    udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+       struct timespec tsval;
+       getnstimeofday(&tsval);
+       return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_timers.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_timers.c
new file mode 100644 (file)
index 0000000..f235c7e
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+    struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+    _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+    if (NULL != t) init_timer(&t->timer);
+    return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+    tim->timer.expires = jiffies + ticks_to_expire;
+    add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire)
+{
+    MALI_DEBUG_ASSERT_POINTER(tim);
+    mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+    MALI_DEBUG_ASSERT_POINTER(tim);
+    del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async( _mali_osk_timer_t *tim )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+    MALI_DEBUG_ASSERT_POINTER(tim);
+    tim->timer.data = (unsigned long)data;
+    tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+    MALI_DEBUG_ASSERT_POINTER(tim);
+    kfree(tim);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_wait_queue.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_wait_queue.c
new file mode 100644 (file)
index 0000000..a42fa03
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct
+{
+    wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
+{
+    _mali_osk_wait_queue_t* ret = NULL;
+
+    ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+    if (NULL == ret)
+    {
+        return ret;
+    }
+
+    init_waitqueue_head(&ret->wait_queue);
+    MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+    return ret;
+}
+
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) )
+{
+    MALI_DEBUG_ASSERT_POINTER( queue );
+    MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+    wait_event(queue->wait_queue, condition());
+}
+
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
+{
+    MALI_DEBUG_ASSERT_POINTER( queue );
+
+    /* if queue is empty, don't attempt to wake up its elements */
+    if (!waitqueue_active(&queue->wait_queue)) return;
+
+    MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+    wake_up_all(&queue->wait_queue);
+
+    MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue )
+{
+       /* Parameter validation  */
+       MALI_DEBUG_ASSERT_POINTER( queue );
+
+       /* Linux requires no explicit termination of wait queues */
+    kfree(queue);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_osk_wq.c b/drivers/gpu/arm/mali400/mali/linux/mali_osk_wq.c
new file mode 100644 (file)
index 0000000..02685fa
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h>        /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_t_struct
+{
+       _mali_osk_wq_work_handler_t handler;
+       void *data;
+       struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+struct workqueue_struct *mali_wq = NULL;
+#endif
+
+static void _mali_osk_wq_work_func ( struct work_struct *work );
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+       MALI_DEBUG_ASSERT(NULL == mali_wq);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+       mali_wq = alloc_workqueue("mali", WQ_UNBOUND, 0);
+#else
+       mali_wq = create_workqueue("mali");
+#endif
+       if(NULL == mali_wq)
+       {
+               MALI_PRINT_ERROR(("Unable to create Mali workqueue\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+       flush_workqueue(mali_wq);
+#else
+       flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+       MALI_DEBUG_ASSERT(NULL != mali_wq);
+
+       flush_workqueue(mali_wq);
+       destroy_workqueue(mali_wq);
+       mali_wq = NULL;
+#else
+       flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data )
+{
+       mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+
+       INIT_WORK( &work->work_handle, _mali_osk_wq_work_func );
+
+       return work;
+}
+
+void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work )
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+       _mali_osk_wq_flush();
+       kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work )
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+       queue_work(mali_wq, &work_object->work_handle);
+#else
+       schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func ( struct work_struct *work )
+{
+       mali_osk_wq_work_object_t *work_object;
+
+       work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+       work_object->handler(work_object->data);
+}
+
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_pmu_power_up_down.c b/drivers/gpu/arm/mali400/mali/linux/mali_pmu_power_up_down.c
new file mode 100644 (file)
index 0000000..ee6aa2f
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_pmu.h"
+#include "linux/mali/mali_utgard.h"
+
+/* Mali PMU power up/down APIs */
+
+int mali_pmu_powerup(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
+
+       if (NULL == pmu)
+       {
+               return -ENXIO;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_pmu_powerup_all(pmu))
+       {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerup);
+
+int mali_pmu_powerdown(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
+
+       if (NULL == pmu)
+       {
+               return -ENXIO;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_pmu_powerdown_all(pmu))
+       {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerdown);
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_profiling_events.h b/drivers/gpu/arm/mali400/mali/linux/mali_profiling_events.h
new file mode 100644 (file)
index 0000000..e1e7afa
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_profiling_internal.c b/drivers/gpu/arm/mali400/mali/linux/mali_profiling_internal.c
new file mode 100644 (file)
index 0000000..2928ce4
--- /dev/null
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry
+{
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+       MALI_PROFILING_STATE_UNINITIALIZED,
+       MALI_PROFILING_STATE_IDLE,
+       MALI_PROFILING_STATE_RUNNING,
+       MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+                       int d2, unsigned int d3, unsigned int d4))
+{
+       add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+       profile_entries = NULL;
+       profile_mask = 0;
+       _mali_osk_atomic_init(&profile_insert_index, 0);
+
+       lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PROFILING);
+       if (NULL == lock)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       prof_state = MALI_PROFILING_STATE_IDLE;
+
+       if (MALI_TRUE == auto_start)
+       {
+               u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit))
+               {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+       u32 count;
+
+       /* Ensure profiling is stopped */
+       _mali_internal_profiling_stop(&count);
+
+       prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+       if (NULL != profile_entries)
+       {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       if (NULL != lock)
+       {
+               _mali_osk_lock_term(lock);
+               lock = NULL;
+       }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
+{
+       _mali_osk_errcode_t ret;
+       mali_profiling_entry *new_profile_entries;
+
+       _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (MALI_PROFILING_STATE_RUNNING == prof_state)
+       {
+               _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+       if (NULL == new_profile_entries)
+       {
+               _mali_osk_vfree(new_profile_entries);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit)
+       {
+               *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+       }
+
+       profile_mask = 1;
+       while (profile_mask <= *limit)
+       {
+               profile_mask <<= 1;
+       }
+       profile_mask >>= 1;
+
+       *limit = profile_mask;
+
+       profile_mask--; /* turns the power of two into a mask of one less */
+
+       if (MALI_PROFILING_STATE_IDLE != prof_state)
+       {
+               _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_vfree(new_profile_entries);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       profile_entries = new_profile_entries;
+
+       ret = _mali_timestamp_reset();
+
+       if (_MALI_OSK_ERR_OK == ret)
+       {
+               prof_state = MALI_PROFILING_STATE_RUNNING;
+       }
+       else
+       {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+       return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+       u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+       profile_entries[cur_index].timestamp = _mali_timestamp_get();
+       profile_entries[cur_index].event_id = event_id;
+       profile_entries[cur_index].data[0] = data0;
+       profile_entries[cur_index].data[1] = data1;
+       profile_entries[cur_index].data[2] = data2;
+       profile_entries[cur_index].data[3] = data3;
+       profile_entries[cur_index].data[4] = data4;
+
+       /* If event is "leave API function", add current memory usage to the event
+        * as data point 4.  This is used in timeline profiling to indicate how
+        * much memory was used when leaving a function. */
+       if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC))
+       {
+               profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+       }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 * count)
+{
+       _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (MALI_PROFILING_STATE_RUNNING != prof_state)
+       {
+               _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       /* go into return state (user to retreive events), no more events will be added after this */
+       prof_state = MALI_PROFILING_STATE_RETURN;
+
+       unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+       tracepoint_synchronize_unregister();
+
+       *count = _mali_osk_atomic_read(&profile_insert_index);
+       if (*count > profile_mask) *count = profile_mask;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+       u32 retval = 0;
+
+       _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+       if (MALI_PROFILING_STATE_RETURN == prof_state)
+       {
+               retval = _mali_osk_atomic_read(&profile_insert_index);
+               if (retval > profile_mask) retval = profile_mask;
+       }
+       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+       return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+       u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+       _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (index < profile_mask)
+       {
+               if ((raw_index & ~profile_mask) != 0)
+               {
+                       index += raw_index;
+                       index &= profile_mask;
+               }
+
+               if (prof_state != MALI_PROFILING_STATE_RETURN)
+               {
+                       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+                       return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+               }
+
+               if(index >= raw_index)
+               {
+                       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               *timestamp = profile_entries[index].timestamp;
+               *event_id = profile_entries[index].event_id;
+               data[0] = profile_entries[index].data[0];
+               data[1] = profile_entries[index].data[1];
+               data[2] = profile_entries[index].data[2];
+               data[3] = profile_entries[index].data[3];
+               data[4] = profile_entries[index].data[4];
+       }
+       else
+       {
+               _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+       _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+       if (MALI_PROFILING_STATE_RETURN != prof_state)
+       {
+               _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       prof_state = MALI_PROFILING_STATE_IDLE;
+       profile_mask = 0;
+       _mali_osk_atomic_init(&profile_insert_index, 0);
+
+       if (NULL != profile_entries)
+       {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+       return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+       return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+       return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_profiling_internal.h b/drivers/gpu/arm/mali400/mali/linux/mali_profiling_internal.h
new file mode 100644 (file)
index 0000000..4646d25
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 * count);
+int _mali_internal_profiling_start(u32 * limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_sync.c b/drivers/gpu/arm/mali400/mali/linux/mali_sync.c
new file mode 100644 (file)
index 0000000..9b84896
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.c
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/sync.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_sync_timeline
+{
+       struct sync_timeline timeline;
+       atomic_t counter;
+       atomic_t signalled;
+};
+
+struct mali_sync_pt
+{
+       struct sync_pt pt;
+       u32 order;
+       s32 error;
+};
+
+static inline struct mali_sync_timeline *to_mali_sync_timeline(struct sync_timeline *timeline)
+{
+       return container_of(timeline, struct mali_sync_timeline, timeline);
+}
+
+static inline struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+       return container_of(pt, struct mali_sync_pt, pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+       struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+       struct mali_sync_pt *new_mpt;
+       struct sync_pt *new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+
+       if (!new_pt)
+       {
+               return NULL;
+       }
+
+       new_mpt = to_mali_sync_pt(new_pt);
+       new_mpt->order = mpt->order;
+
+       return new_pt;
+
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+       struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+       struct mali_sync_timeline *mtl = to_mali_sync_timeline(pt->parent);
+       long diff;
+
+       if (0 != mpt->error)
+       {
+               return mpt->error;
+       }
+
+       diff = atomic_read(&mtl->signalled) - mpt->order;
+
+       return diff >= 0;
+}
+
+static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
+{
+       struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
+       struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
+
+       long diff = ma->order - mb->order;
+
+       if (diff < 0)
+       {
+               return -1;
+       }
+       else if (diff == 0)
+       {
+               return 0;
+       }
+       else
+       {
+               return 1;
+       }
+}
+
+static void timeline_print_tl(struct seq_file *s, struct sync_timeline *sync_timeline)
+{
+       struct mali_sync_timeline *mtl = to_mali_sync_timeline(sync_timeline);
+
+       seq_printf(s, "%u, %u", atomic_read(&mtl->signalled), atomic_read(&mtl->counter));
+}
+
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+       struct mali_sync_pt *mpt = to_mali_sync_pt(sync_pt);
+
+       seq_printf(s, "%u", mpt->order);
+
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+       .driver_name    = "Mali",
+       .dup            = timeline_dup,
+       .has_signaled   = timeline_has_signaled,
+       .compare        = timeline_compare,
+       .print_obj      = timeline_print_tl,
+       .print_pt       = timeline_print_pt
+};
+
+int mali_sync_timeline_is_ours(struct sync_timeline *timeline)
+{
+       return (timeline->ops == &mali_timeline_ops);
+}
+
+struct sync_timeline *mali_sync_timeline_alloc(const char * name)
+{
+       struct sync_timeline *tl;
+       struct mali_sync_timeline *mtl;
+
+       tl = sync_timeline_create(&mali_timeline_ops,
+                                 sizeof(struct mali_sync_timeline), name);
+       if (!tl)
+       {
+               return NULL;
+       }
+
+       /* Set the counter in our private struct */
+       mtl = to_mali_sync_timeline(tl);
+       atomic_set(&mtl->counter, 0);
+       atomic_set(&mtl->signalled, 0);
+
+       return tl;
+}
+
+struct sync_pt *mali_sync_pt_alloc(struct sync_timeline *parent)
+{
+       struct sync_pt *pt = sync_pt_create(parent, sizeof(struct mali_sync_pt));
+       struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
+       struct mali_sync_pt *mpt;
+
+       if (!pt)
+       {
+               return NULL;
+       }
+
+       mpt = to_mali_sync_pt(pt);
+       mpt->order = atomic_inc_return(&mtl->counter);
+       mpt->error = 0;
+
+       return pt;
+}
+
+void mali_sync_signal_pt(struct sync_pt *pt, int error)
+{
+       struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+       struct mali_sync_timeline *mtl = to_mali_sync_timeline(pt->parent);
+       int signalled;
+       long diff;
+
+       if (0 != error)
+       {
+               MALI_DEBUG_ASSERT(0 > error);
+               mpt->error = error;
+       }
+
+       do {
+
+               signalled = atomic_read(&mtl->signalled);
+
+               diff = signalled - mpt->order;
+
+               if (diff > 0)
+               {
+                       /* The timeline is already at or ahead of this point. This should not happen unless userspace
+                        * has been signalling fences out of order, so warn but don't violate the sync_pt API.
+                        * The warning is only in debug builds to prevent a malicious user being able to spam dmesg.
+                        */
+                       MALI_DEBUG_PRINT_ERROR(("Sync points were triggerd in a different order to allocation!\n"));
+                       return;
+               }
+       } while (atomic_cmpxchg(&mtl->signalled, signalled, mpt->order) != signalled);
+
+       sync_timeline_signal(pt->parent);
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_sync.h b/drivers/gpu/arm/mali400/mali/linux/mali_sync.h
new file mode 100644 (file)
index 0000000..efd8179
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#include <linux/version.h>
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+
+#include <linux/seq_file.h>
+#include <linux/sync.h>
+
+/*
+ * Create a stream object.
+ * Built on top of timeline object.
+ * Exposed as a file descriptor.
+ * Life-time controlled via the file descriptor:
+ * - dup to add a ref
+ * - close to remove a ref
+ */
+_mali_osk_errcode_t mali_stream_create(const char * name, int * out_fd);
+
+/*
+ * Create a fence in a stream object
+ */
+struct sync_pt *mali_stream_create_point(int tl_fd);
+int mali_stream_create_fence(struct sync_pt *pt);
+
+/*
+ * Validate a fd to be a valid fence
+ * No reference is taken.
+ *
+ * This function is only usable to catch unintentional user errors early,
+ * it does not stop malicious code changing the fd after this function returns.
+ */
+_mali_osk_errcode_t mali_fence_validate(int fd);
+
+
+/* Returns true if the specified timeline is allocated by Mali */
+int mali_sync_timeline_is_ours(struct sync_timeline *timeline);
+
+/* Allocates a timeline for Mali
+ *
+ * One timeline should be allocated per API context.
+ */
+struct sync_timeline *mali_sync_timeline_alloc(const char *name);
+
+/* Allocates a sync point within the timeline.
+ *
+ * The timeline must be the one allocated by mali_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ */
+struct sync_pt *mali_sync_pt_alloc(struct sync_timeline *parent);
+
+/* Signals a particular sync point
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ *
+ * If they are signalled in the wrong order then a message will be printed in debug
+ * builds and otherwise attempts to signal order sync_pts will be ignored.
+ */
+void mali_sync_signal_pt(struct sync_pt *pt, int error);
+
+#endif
+#endif /* CONFIG_SYNC */
+#endif /* _MALI_SYNC_H_ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_sync_user.c b/drivers/gpu/arm/mali400/mali/linux/mali_sync_user.c
new file mode 100644 (file)
index 0000000..c346668
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync_user.c
+ *
+ */
+
+#ifdef CONFIG_SYNC
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include <asm/uaccess.h>
+
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+static int mali_stream_close(struct inode * inode, struct file * file)
+{
+       struct sync_timeline * tl;
+       tl = (struct sync_timeline*)file->private_data;
+       BUG_ON(!tl);
+       sync_timeline_destroy(tl);
+       return 0;
+}
+
+static struct file_operations stream_fops =
+{
+       .owner = THIS_MODULE,
+       .release = mali_stream_close,
+};
+
+_mali_osk_errcode_t mali_stream_create(const char * name, int *out_fd)
+{
+       struct sync_timeline * tl;
+       BUG_ON(!out_fd);
+
+       tl = mali_sync_timeline_alloc(name);
+       if (!tl)
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       *out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY | O_CLOEXEC);
+
+       if (*out_fd < 0)
+       {
+               sync_timeline_destroy(tl);
+               return _MALI_OSK_ERR_FAULT;
+       }
+       else
+       {
+               return _MALI_OSK_ERR_OK;
+       }
+}
+
+mali_sync_pt *mali_stream_create_point(int tl_fd)
+{
+       struct sync_timeline *tl;
+       struct sync_pt * pt;
+       struct file *tl_file;
+
+       tl_file = fget(tl_fd);
+       if (tl_file == NULL)
+               return NULL;
+
+       if (tl_file->f_op != &stream_fops)
+       {
+               pt = NULL;
+               goto out;
+       }
+
+       tl = tl_file->private_data;
+
+       pt = mali_sync_pt_alloc(tl);
+       if (!pt)
+       {
+               pt = NULL;
+               goto out;
+       }
+
+out:
+       fput(tl_file);
+
+       return pt;
+}
+
+int mali_stream_create_fence(mali_sync_pt *pt)
+{
+       struct sync_fence *fence;
+       struct fdtable * fdt;
+       struct files_struct * files;
+       int fd = -1;
+
+       fence = sync_fence_create("mali_fence", pt);
+       if (!fence)
+       {
+               sync_pt_free(pt);
+               fd = -EFAULT;
+               goto out;
+       }
+
+       /* create a fd representing the fence */
+       fd = get_unused_fd();
+       if (fd < 0)
+       {
+               sync_fence_put(fence);
+               goto out;
+       }
+
+       files = current->files;
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       __set_close_on_exec(fd, fdt);
+#else
+       FD_SET(fd, fdt->close_on_exec);
+#endif
+       spin_unlock(&files->file_lock);
+
+       /* bind fence to the new fd */
+       sync_fence_install(fence, fd);
+
+out:
+       return fd;
+}
+
+_mali_osk_errcode_t mali_fence_validate(int fd)
+{
+       struct sync_fence * fence;
+       fence = sync_fence_fdget(fd);
+       if (NULL != fence)
+       {
+               sync_fence_put(fence);
+               return _MALI_OSK_ERR_OK;
+       }
+       else
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+}
+
+#endif
+#endif /* CONFIG_SYNC */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_uk_types.h b/drivers/gpu/arm/mali400/mali/linux/mali_uk_types.h
new file mode 100644 (file)
index 0000000..d6efb8e
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_core.c b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_core.c
new file mode 100644 (file)
index 0000000..ccf3692
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/slab.h>     /* memort allocation functions */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_sync.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+       _mali_uk_get_api_version_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+    if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_get_api_version(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+    if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+    if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+    return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+    _mali_uk_wait_for_notification_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_wait_for_notification(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+       {
+               kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+               if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+       }
+       else
+       {
+               if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+       }
+
+    return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+       _mali_uk_post_notification_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+
+       if (0 != get_user(kargs.type, &uargs->type))
+       {
+               return -EFAULT;
+       }
+
+       err = _mali_ukk_post_notification(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+       _mali_uk_get_user_settings_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_get_user_settings(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+       return 0;
+}
+
+#ifdef CONFIG_SYNC
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+int stream_create_wrapper(struct mali_session_data *session_data, _mali_uk_stream_create_s __user *uargs)
+{
+       _mali_uk_stream_create_s kargs;
+       _mali_osk_errcode_t err;
+       char name[32];
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       snprintf(name, 32, "mali-%u", _mali_osk_get_pid());
+
+       kargs.ctx = session_data;
+       err = mali_stream_create(name, &kargs.fd);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_stream_create_s))) return -EFAULT;
+
+       return 0;
+}
+
+int sync_fence_validate_wrapper(struct mali_session_data *session, _mali_uk_fence_validate_s __user *uargs)
+{
+       int fd;
+       _mali_osk_errcode_t err;
+
+       if (0 != get_user(fd, &uargs->fd))
+       {
+               return -EFAULT;
+       }
+
+       err = mali_fence_validate(fd);
+
+       if (_MALI_OSK_ERR_OK == err)
+       {
+               return 0;
+       }
+
+       return -EINVAL;
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_gp.c b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_gp.c
new file mode 100644 (file)
index 0000000..598e299
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_gp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+    _mali_uk_get_gp_core_version_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    kargs.ctx = session_data;
+    err =  _mali_ukk_get_gp_core_version(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* no known transactions to roll-back */
+
+    if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+    return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+    _mali_uk_gp_suspend_response_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_gp_suspend_response(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+    if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+    /* no known transactions to roll-back */
+    return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+    _mali_uk_get_gp_number_of_cores_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_get_gp_number_of_cores(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* no known transactions to roll-back */
+
+    if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+    return 0;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_mem.c b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_mem.c
new file mode 100644 (file)
index 0000000..6ce45c1
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+    _mali_uk_init_mem_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_init_mem(&kargs);
+    if (_MALI_OSK_ERR_OK != err)
+    {
+        return map_errcode(err);
+    }
+
+    if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+    if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+    return 0;
+
+mem_init_rollback:
+       {
+               _mali_uk_term_mem_s kargs;
+               kargs.ctx = session_data;
+               err = _mali_ukk_term_mem(&kargs);
+               if (_MALI_OSK_ERR_OK != err)
+               {
+                       MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+               }
+       }
+    return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+    _mali_uk_term_mem_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_term_mem(&kargs);
+    if (_MALI_OSK_ERR_OK != err)
+    {
+        return map_errcode(err);
+    }
+
+    return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+       _mali_uk_map_external_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+    MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+       {
+               return -EFAULT;
+       }
+
+    uk_args.ctx = session_data;
+       err_code = _mali_ukk_map_external_mem( &uk_args );
+
+    if (0 != put_user(uk_args.cookie, &argument->cookie))
+    {
+        if (_MALI_OSK_ERR_OK == err_code)
+        {
+            /* Rollback */
+               _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+            uk_args_unmap.ctx = session_data;
+            uk_args_unmap.cookie = uk_args.cookie;
+            err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+            if (_MALI_OSK_ERR_OK != err_code)
+            {
+                MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+            }
+        }
+        return -EFAULT;
+    }
+
+    /* Return the error that _mali_ukk_free_big_block produced */
+       return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+       _mali_uk_unmap_external_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+    MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+       {
+               return -EFAULT;
+       }
+
+    uk_args.ctx = session_data;
+       err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+       /* Return the error that _mali_ukk_free_big_block produced */
+       return map_errcode(err_code);
+}
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+       _mali_uk_release_ump_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+    MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+       {
+               return -EFAULT;
+       }
+
+    uk_args.ctx = session_data;
+       err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+       /* Return the error that _mali_ukk_free_big_block produced */
+       return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+       _mali_uk_attach_ump_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+    MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+       {
+               return -EFAULT;
+       }
+
+    uk_args.ctx = session_data;
+       err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+    if (0 != put_user(uk_args.cookie, &argument->cookie))
+    {
+        if (_MALI_OSK_ERR_OK == err_code)
+        {
+            /* Rollback */
+               _mali_uk_release_ump_mem_s uk_args_unmap;
+
+            uk_args_unmap.ctx = session_data;
+            uk_args_unmap.cookie = uk_args.cookie;
+            err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+            if (_MALI_OSK_ERR_OK != err_code)
+            {
+                MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+            }
+        }
+        return -EFAULT;
+    }
+
+    /* Return the error that _mali_ukk_map_external_ump_mem produced */
+       return map_errcode(err_code);
+}
+#endif /* CONFIG_MALI400_UMP */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+    _mali_uk_query_mmu_page_table_dump_size_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    kargs.ctx = session_data;
+
+    err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+    if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+    return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+    _mali_uk_dump_mmu_page_table_s kargs;
+    _mali_osk_errcode_t err;
+    void *buffer;
+    int rc = -EFAULT;
+
+       /* validate input */
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       /* the session_data pointer was validated by caller */
+
+    kargs.buffer = NULL;
+
+    /* get location of user buffer */
+       if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+       /* get size of mmu page table info buffer from user space */
+       if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+    /* verify we can access the whole of the user buffer */
+    if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+    /* allocate temporary buffer (kernel side) to store mmu page table info */
+    kargs.buffer = _mali_osk_valloc(kargs.size);
+    if (NULL == kargs.buffer)
+    {
+        rc = -ENOMEM;
+        goto err_exit;
+    }
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_dump_mmu_page_table(&kargs);
+    if (_MALI_OSK_ERR_OK != err)
+    {
+        rc = map_errcode(err);
+        goto err_exit;
+    }
+
+    /* copy mmu page table info back to user space and update pointers */
+       if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+    if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+    if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+    if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+    if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+    rc = 0;
+
+err_exit:
+    if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+    return rc;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_pp.c b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_pp.c
new file mode 100644 (file)
index 0000000..0bbd5a5
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+       int fence = -1;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_pp_start_job(session_data, uargs, &fence);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (-1 != fence)
+       {
+               if (0 != put_user(fence, &uargs->fence)) return -EFAULT;
+       }
+
+       return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+    _mali_uk_get_pp_number_of_cores_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_get_pp_number_of_cores(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+    if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+    return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+    _mali_uk_get_pp_core_version_s kargs;
+    _mali_osk_errcode_t err;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    kargs.ctx = session_data;
+    err = _mali_ukk_get_pp_core_version(&kargs);
+    if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+    if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+    return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+       _mali_uk_pp_disable_wb_s kargs;
+
+    MALI_CHECK_NON_NULL(uargs, -EINVAL);
+    MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+    if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+    kargs.ctx = session_data;
+    _mali_ukk_pp_job_disable_wb(&kargs);
+
+    return 0;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_profiling.c b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_profiling.c
new file mode 100644 (file)
index 0000000..236f8e5
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+       _mali_uk_profiling_start_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+       {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_start(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.limit, &uargs->limit))
+       {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+       _mali_uk_profiling_add_event_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+       {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_add_event(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+       _mali_uk_profiling_stop_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_stop(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.count, &uargs->count))
+       {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+       _mali_uk_profiling_get_event_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.index, &uargs->index))
+       {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+
+       err = _mali_ukk_profiling_get_event(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+       {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+       _mali_uk_profiling_clear_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_clear(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+       _mali_uk_sw_counters_report_s kargs;
+       _mali_osk_errcode_t err;
+       u32 *counter_buffer;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s)))
+       {
+               return -EFAULT;
+       }
+
+       /* make sure that kargs.num_counters is [at least somewhat] sane */
+       if (kargs.num_counters > 10000) {
+               MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+               return -EINVAL;
+       }
+
+       counter_buffer = (u32*)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+       if (NULL == counter_buffer)
+       {
+               return -ENOMEM;
+       }
+
+       if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters))
+       {
+               kfree(counter_buffer);
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       kargs.counters = counter_buffer;
+
+       err = _mali_ukk_sw_counters_report(&kargs);
+
+       kfree(counter_buffer);
+
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_vsync.c b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_vsync.c
new file mode 100644 (file)
index 0000000..184aa08
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+       _mali_uk_vsync_event_report_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+       {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_vsync_event_report(&kargs);
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
diff --git a/drivers/gpu/arm/mali400/mali/linux/mali_ukk_wrappers.h b/drivers/gpu/arm/mali400/mali/linux/mali_ukk_wrappers.h
new file mode 100644 (file)
index 0000000..0ad27fa
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+#if defined(CONFIG_SYNC)
+/* MALI_SEC */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+int stream_create_wrapper(struct mali_session_data *session_data, _mali_uk_stream_create_s __user *uargs);
+int sync_fence_validate_wrapper(struct mali_session_data *session, _mali_uk_fence_validate_s __user *uargs);
+#endif
+#endif
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/platform/exynos4/exynos4.c b/drivers/gpu/arm/mali400/mali/platform/exynos4/exynos4.c
new file mode 100644 (file)
index 0000000..f13116b
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ * Mali400 platform glue for Samsung Exynos 4 SoCs
+ *
+ * Copyright 2013 by Samsung Electronics Co., Ltd.
+ * Author: Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/mali/mali_utgard.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#ifdef CONFIG_MALI400_PROFILING
+#include "mali_osk_profiling.h"
+#endif
+
+#include "exynos4.h"
+
+struct mali_exynos_variant {
+       const struct mali_exynos_dvfs_step *steps;
+       unsigned int nr_steps;
+       unsigned int has_smmuclk;
+};
+
+struct mali_exynos_dvfs_step {
+       unsigned int rate;
+       unsigned int voltage;
+       unsigned int downthreshold;
+       unsigned int upthreshold;
+};
+
+struct mali_exynos_drvdata {
+       struct device *dev;
+
+       const struct mali_exynos_dvfs_step *steps;
+       unsigned int nr_steps;
+       unsigned int has_smmuclk;
+
+       struct clk *pll;
+       struct clk *mux1;
+       struct clk *mux2;
+       struct clk *sclk;
+       struct clk *smmu;
+       struct clk *g3d;
+
+       struct regulator *vdd_g3d;
+
+       mali_power_mode power_mode;
+       unsigned int dvfs_step;
+       unsigned int load;
+
+       struct workqueue_struct *dvfs_workqueue;
+       struct work_struct dvfs_work;
+};
+
+extern struct platform_device *mali_platform_device;
+
+static struct mali_exynos_drvdata *mali;
+
+/*
+ * DVFS tables
+ */
+
+#define MALI_DVFS_STEP(freq, voltage, down, up) \
+       {freq, voltage, (256 * down) / 100, (256 * up) / 100}
+
+static const struct mali_exynos_dvfs_step mali_exynos_dvfs_step_3250[] = {
+       MALI_DVFS_STEP(134,       0,  0, 100)
+};
+
+static const struct mali_exynos_dvfs_step mali_exynos_dvfs_step_4210[] = {
+       MALI_DVFS_STEP(160,  950000,  0,  90),
+       MALI_DVFS_STEP(266, 1050000, 85, 100)
+};
+
+static const struct mali_exynos_dvfs_step mali_exynos_dvfs_step_4x12[] = {
+       MALI_DVFS_STEP(160,  875000,  0,  70),
+       MALI_DVFS_STEP(266,  900000, 62,  90),
+       MALI_DVFS_STEP(350,  950000, 85,  90),
+       MALI_DVFS_STEP(440, 1025000, 85, 100)
+};
+
+static const struct mali_exynos_dvfs_step mali_exynos_dvfs_step_4x12_prime[] = {
+       MALI_DVFS_STEP(160,  875000,  0,  70),
+       MALI_DVFS_STEP(266,  900000, 62,  90),
+       MALI_DVFS_STEP(350,  950000, 85,  90),
+       MALI_DVFS_STEP(440, 1025000, 85,  90),
+       MALI_DVFS_STEP(533, 1075000, 95, 100)
+};
+
+/*
+ * Variants
+ */
+
+static const struct mali_exynos_variant mali_variant_3250= {
+       .steps = mali_exynos_dvfs_step_3250,
+       .nr_steps = ARRAY_SIZE(mali_exynos_dvfs_step_3250),
+       .has_smmuclk = true,
+};
+
+static const struct mali_exynos_variant mali_variant_4210 = {
+       .steps = mali_exynos_dvfs_step_4210,
+       .nr_steps = ARRAY_SIZE(mali_exynos_dvfs_step_4210),
+};
+
+static const struct mali_exynos_variant mali_variant_4x12 = {
+       .steps = mali_exynos_dvfs_step_4x12,
+       .nr_steps = ARRAY_SIZE(mali_exynos_dvfs_step_4x12),
+};
+
+static const struct mali_exynos_variant mali_variant_4x12_prime = {
+       .steps = mali_exynos_dvfs_step_4x12_prime,
+       .nr_steps = ARRAY_SIZE(mali_exynos_dvfs_step_4x12_prime),
+};
+
+const struct of_device_id mali_of_matches[] = {
+       { .compatible = "samsung,exynos3250-g3d",
+                                       .data = &mali_variant_3250, },
+       { .compatible = "samsung,exynos4210-g3d",
+                                       .data = &mali_variant_4210, },
+       { .compatible = "samsung,exynos4x12-g3d",
+                                       .data = &mali_variant_4x12, },
+       { .compatible = "samsung,exynos4x12-prime-g3d",
+                                       .data = &mali_variant_4x12_prime, },
+       { /* Sentinel */ }
+};
+
+#ifdef CONFIG_MALI400_PROFILING
+static inline void _mali_osk_profiling_add_gpufreq_event(int rate, int vol)
+{
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_EVENT_CHANNEL_GPU |
+                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                rate, vol, 0, 0, 0);
+}
+#else
+static inline void _mali_osk_profiling_add_gpufreq_event(int rate, int vol)
+{
+}
+#endif
+
+/*
+ * DVFS control
+ */
+
+static void mali_exynos_set_dvfs_step(struct mali_exynos_drvdata *mali,
+                                                       unsigned int step)
+{
+       const struct mali_exynos_dvfs_step *next = &mali->steps[step];
+
+       if (step <= mali->dvfs_step)
+               clk_set_rate(mali->sclk, next->rate * 1000000);
+
+       regulator_set_voltage(mali->vdd_g3d,
+                                       next->voltage, next->voltage);
+
+       if (step > mali->dvfs_step)
+               clk_set_rate(mali->sclk, next->rate * 1000000);
+
+       _mali_osk_profiling_add_gpufreq_event(next->rate * 1000000,
+                regulator_get_voltage(mali->vdd_g3d) / 1000);
+       mali->dvfs_step = step;
+}
+
+static void exynos_dvfs_work(struct work_struct *work)
+{
+       struct mali_exynos_drvdata *mali = container_of(work,
+                                       struct mali_exynos_drvdata, dvfs_work);
+       unsigned int step = mali->dvfs_step;
+       const struct mali_exynos_dvfs_step *cur = &mali->steps[step];
+
+       if (mali->load > cur->upthreshold)
+               ++step;
+       else if (mali->load < cur->downthreshold)
+               --step;
+
+       BUG_ON(step >= mali->nr_steps);
+
+       if (step != mali->dvfs_step)
+               mali_exynos_set_dvfs_step(mali, step);
+}
+
+static void exynos_update_dvfs(unsigned int load)
+{
+       if (load > 255)
+               load = 255;
+
+       mali->load = load;
+
+       queue_work(mali->dvfs_workqueue, &mali->dvfs_work);
+}
+
+/*
+ * Power management
+ */
+
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+       if (WARN_ON(mali->power_mode == power_mode))
+               MALI_SUCCESS;
+
+       switch (power_mode) {
+       case MALI_POWER_MODE_ON:
+               mali_exynos_set_dvfs_step(mali, 0);
+               clk_prepare_enable(mali->g3d);
+               clk_prepare_enable(mali->sclk);
+               if (mali->has_smmuclk)
+                       clk_prepare_enable(mali->smmu);
+               break;
+
+       case MALI_POWER_MODE_LIGHT_SLEEP:
+       case MALI_POWER_MODE_DEEP_SLEEP:
+               if (mali->has_smmuclk)
+                       clk_disable_unprepare(mali->smmu);
+               clk_disable_unprepare(mali->sclk);
+               clk_disable_unprepare(mali->g3d);
+               _mali_osk_profiling_add_gpufreq_event(0, 0);
+               break;
+       }
+
+       mali->power_mode = power_mode;
+
+       MALI_SUCCESS;
+}
+
+/*
+ * Platform-specific initialization/cleanup
+ */
+
+static struct mali_gpu_device_data mali_exynos_gpu_data = {
+       .shared_mem_size = SZ_256M,
+       .fb_start = 0x40000000,
+       .fb_size = 0xb1000000,
+       .utilization_interval = 100, /* 100ms in Tizen */
+       .utilization_handler = exynos_update_dvfs,
+};
+
+_mali_osk_errcode_t mali_platform_init(void)
+{
+       struct platform_device *pdev = mali_platform_device;
+       const struct mali_exynos_variant *variant;
+       const struct of_device_id *match;
+       struct resource *old_res, *new_res;
+       unsigned int i, irq_res, mem_res;
+       struct device_node *np;
+
+       if (WARN_ON(!pdev))
+               return -ENODEV;
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+       pdev->dev.platform_data = &mali_exynos_gpu_data;
+
+       np = pdev->dev.of_node;
+       if (WARN_ON(!np))
+               return -ENODEV;
+
+       match = of_match_node(mali_of_matches, np);
+       if (WARN_ON(!match))
+               return -ENODEV;
+
+       variant = match->data;
+
+       old_res = pdev->resource;
+       new_res = kzalloc(sizeof(*new_res) * pdev->num_resources, GFP_KERNEL);
+       if (WARN_ON(!new_res))
+               return -ENOMEM;
+
+       /* Copy first resource */
+       memcpy(new_res, old_res++, sizeof(*new_res));
+
+       /* Rearrange next resources */
+       irq_res = 0;
+       mem_res = 0;
+       for (i = 1; i < pdev->num_resources; ++i, ++old_res) {
+               if (resource_type(old_res) == IORESOURCE_MEM)
+                       memcpy(&new_res[1 + 2 * mem_res++],
+                                               old_res, sizeof(*old_res));
+               else if (resource_type(old_res) == IORESOURCE_IRQ)
+                       memcpy(&new_res[2 + 2 * irq_res++],
+                                               old_res, sizeof(*old_res));
+       }
+
+       kfree(pdev->resource);
+       pdev->resource = new_res;
+
+       mali = devm_kzalloc(&pdev->dev, sizeof(*mali), GFP_KERNEL);
+       if (WARN_ON(!mali))
+               return -ENOMEM;
+
+       mali->dev = &pdev->dev;
+       mali->steps = variant->steps;
+       mali->nr_steps = variant->nr_steps;
+       mali->has_smmuclk = variant->has_smmuclk;
+
+       mali->pll = devm_clk_get(mali->dev, "pll");
+       if (WARN_ON(IS_ERR(mali->pll)))
+               return PTR_ERR(mali->pll);
+
+       mali->mux1 = devm_clk_get(mali->dev, "mux1");
+       if (WARN_ON(IS_ERR(mali->mux1)))
+               return PTR_ERR(mali->mux1);
+
+       mali->mux2 = devm_clk_get(mali->dev, "mux2");
+       if (WARN_ON(IS_ERR(mali->mux2)))
+               return PTR_ERR(mali->mux2);
+
+       mali->sclk = devm_clk_get(mali->dev, "sclk");
+       if (WARN_ON(IS_ERR(mali->sclk)))
+               return PTR_ERR(mali->sclk);
+
+       if (mali->has_smmuclk) {
+               mali->smmu = devm_clk_get(mali->dev, "smmu");
+               if (WARN_ON(IS_ERR(mali->smmu)))
+                       return PTR_ERR(mali->smmu);
+       }
+
+       mali->g3d = devm_clk_get(mali->dev, "g3d");
+       if (WARN_ON(IS_ERR(mali->g3d)))
+               return PTR_ERR(mali->g3d);
+
+       mali->vdd_g3d = devm_regulator_get(mali->dev, "vdd_g3d");
+       if (WARN_ON(IS_ERR(mali->vdd_g3d)))
+               return PTR_ERR(mali->vdd_g3d);
+
+       mali->dvfs_workqueue = create_singlethread_workqueue("mali_dvfs");
+       if (WARN_ON(!mali->dvfs_workqueue))
+               return -EFAULT;
+
+       mali->power_mode = MALI_POWER_MODE_LIGHT_SLEEP;
+
+       INIT_WORK(&mali->dvfs_work, exynos_dvfs_work);
+
+       regulator_enable(mali->vdd_g3d);
+
+       clk_set_parent(mali->mux1, mali->pll);
+       clk_set_parent(mali->mux2, mali->mux1);
+       mali_exynos_set_dvfs_step(mali, 0);
+
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 300);
+       pm_runtime_use_autosuspend(&pdev->dev);
+
+       pm_runtime_enable(&pdev->dev);
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(void)
+{
+       struct platform_device *pdev = mali_platform_device;
+
+       pm_runtime_disable(&pdev->dev);
+
+       regulator_disable(mali->vdd_g3d);
+
+       _mali_osk_profiling_add_gpufreq_event(0, 0);
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/mali/platform/exynos4/exynos4.h b/drivers/gpu/arm/mali400/mali/platform/exynos4/exynos4.h
new file mode 100644 (file)
index 0000000..d4f5640
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Mali400 platform glue for Samsung Exynos 4 SoCs
+ *
+ * Copyright 2013 by Samsung Electronics Co., Ltd.
+ * Author: Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+#ifndef __EXYNOS4_H__
+#define __EXYNOS4_H__
+
+#include "mali_osk.h"
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,
+       MALI_POWER_MODE_LIGHT_SLEEP,
+       MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(void);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(void);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ *  1) MALI_POWER_MODE_ON
+ *  2) MALI_POWER_MODE_LIGHT_SLEEP
+ *  3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode);
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(unsigned int utilization);
+
+/** @brief Platform specific power management initialization
+ *
+ * Initializes platform-specific part of power management.
+ */
+_mali_osk_errcode_t exynos_pmm_init(void);
+
+/** @brief Platform specific power management deinitialization
+ *
+ * Deinitializes platform-specific part of power management.
+ */
+_mali_osk_errcode_t exynos_pmm_deinit(void);
+
+extern const struct of_device_id mali_of_matches[];
+
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/platform/mali_platform.h b/drivers/gpu/arm/mali400/mali/platform/mali_platform.h
new file mode 100644 (file)
index 0000000..9ce814f
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PLATFORM_H__
+#define __MALI_PLATFORM_H__
+
+#include "mali_osk.h"
+
+#ifdef CONFIG_CPU_EXYNOS4210
+#define MALI_DVFS_STEPS 2
+#else
+#define MALI_DVFS_STEPS 5
+#endif
+
+/* @Enable or Disable Mali GPU Bottom Lock feature */
+#define MALI_GPU_BOTTOM_LOCK 1
+
+#define MALI_VOLTAGE_LOCK 1
+
+/* @Enable or Disable the CPU frequency lock when the GPU clock is 440 Mhz */
+#define CPUFREQ_LOCK_DURING_440 0
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,           /**< Power Mali on */
+       MALI_POWER_MODE_LIGHT_SLEEP,  /**< Mali has been idle for a short time, or runtime PM suspend */
+       MALI_POWER_MODE_DEEP_SLEEP,   /**< Mali has been idle for a long time, or OS suspend */
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(void);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(void);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Notification from the Mali device driver stating the new desired power mode.
+ * MALI_POWER_MODE_ON must be obeyed, while the other modes are optional.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(u32 utilization);
+
+/** @brief Setting the power domain of MALI
+ *
+ * This function sets the power domain of MALI if Linux run time power management is enabled
+ *
+ * @param dev Reference to struct platform_device (defined in linux) used by MALI GPU
+ */
+//void set_mali_parent_power_domain(void* dev);
+void mali_utilization_suspend(void);
+
+#ifdef CONFIG_REGULATOR
+int mali_regulator_get_usecount(void);
+void mali_regulator_disable(void);
+void mali_regulator_enable(void);
+void mali_regulator_set_voltage(int min_uV, int max_uV);
+#endif
+mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz);
+unsigned long mali_clk_get_rate(void);
+void mali_clk_put(mali_bool binc_mali_clk);
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores);
+_mali_osk_errcode_t mali_platform_powerup(u32 cores);
+#endif
+
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief function to get status of individual cores
+ *
+ * This function is used by power management test suite to get the status of powered up/down the number
+ * of cores
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+u32 pmu_get_power_up_down_info(void);
+#endif
+#endif
+
+#if MALI_DVFS_ENABLED
+mali_bool init_mali_dvfs_status(int step);
+void deinit_mali_dvfs_status(void);
+mali_bool mali_dvfs_handler(u32 utilization);
+int mali_dvfs_is_running(void);
+void mali_dvfs_late_resume(void);
+int get_mali_dvfs_control_status(void);
+mali_bool set_mali_dvfs_current_step(unsigned int step);
+void mali_default_step_set(int step, mali_bool boostup);
+int change_dvfs_tableset(int change_clk, int change_step);
+#ifdef CONFIG_CPU_EXYNOS4210
+#if MALI_GPU_BOTTOM_LOCK
+int mali_dvfs_bottom_lock_push(void);
+int mali_dvfs_bottom_lock_pop(void);
+#endif
+#else
+int mali_dvfs_bottom_lock_push(int lock_step);
+int mali_dvfs_bottom_lock_pop(void);
+#endif
+#endif
+
+int mali_dvfs_get_vol(int step);
+
+#if MALI_VOLTAGE_LOCK
+int mali_voltage_lock_push(int lock_vol);
+int mali_voltage_lock_pop(void);
+int mali_voltage_lock_init(void);
+int mali_vol_get_from_table(int vol);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/platform/redwood/exynos4.c b/drivers/gpu/arm/mali400/mali/platform/redwood/exynos4.c
new file mode 100644 (file)
index 0000000..348bc7e
--- /dev/null
@@ -0,0 +1,272 @@
+/* drivers/gpu/mali400/mali/platform/pegasus-m400/exynos4.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/irq.h>
+#include <plat/devs.h>
+
+#include "exynos4_pmm.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) && LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+extern struct platform_device exynos4_device_pd[];
+#else
+extern struct platform_device s5pv310_device_pd[];
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) */
+
+static void mali_platform_device_release(struct device *device);
+static int mali_os_suspend(struct device *device);
+static int mali_os_resume(struct device *device);
+static int mali_os_freeze(struct device *device);
+static int mali_os_thaw(struct device *device);
+#ifdef CONFIG_PM_RUNTIME
+static int mali_runtime_suspend(struct device *device);
+static int mali_runtime_resume(struct device *device);
+static int mali_runtime_idle(struct device *device);
+#endif
+
+#define MALI_GP_IRQ       EXYNOS4_IRQ_GP_3D
+#define MALI_PP0_IRQ      EXYNOS4_IRQ_PP0_3D
+#define MALI_PP1_IRQ      EXYNOS4_IRQ_PP1_3D
+#define MALI_PP2_IRQ      EXYNOS4_IRQ_PP2_3D
+#define MALI_PP3_IRQ      EXYNOS4_IRQ_PP3_3D
+#define MALI_GP_MMU_IRQ   EXYNOS4_IRQ_GPMMU_3D
+#define MALI_PP0_MMU_IRQ  EXYNOS4_IRQ_PPMMU0_3D
+#define MALI_PP1_MMU_IRQ  EXYNOS4_IRQ_PPMMU1_3D
+#define MALI_PP2_MMU_IRQ  EXYNOS4_IRQ_PPMMU2_3D
+#define MALI_PP3_MMU_IRQ  EXYNOS4_IRQ_PPMMU3_3D
+
+static struct resource mali_gpu_resources[] =
+{
+       MALI_GPU_RESOURCES_MALI400_MP4(0x13000000,
+                                      MALI_GP_IRQ, MALI_GP_MMU_IRQ,
+                                      MALI_PP0_IRQ, MALI_PP0_MMU_IRQ,
+                                      MALI_PP1_IRQ, MALI_PP1_MMU_IRQ,
+                                      MALI_PP2_IRQ, MALI_PP2_MMU_IRQ,
+                                      MALI_PP3_IRQ, MALI_PP3_MMU_IRQ)
+};
+
+static struct dev_pm_ops mali_gpu_device_type_pm_ops =
+{
+       .suspend = mali_os_suspend,
+       .resume = mali_os_resume,
+       .freeze = mali_os_freeze,
+       .thaw = mali_os_thaw,
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = mali_runtime_suspend,
+       .runtime_resume = mali_runtime_resume,
+       .runtime_idle = mali_runtime_idle,
+#endif
+};
+
+static struct device_type mali_gpu_device_device_type =
+{
+       .pm = &mali_gpu_device_type_pm_ops,
+};
+
+
+static struct platform_device *mali_gpu_device;
+
+static struct mali_gpu_device_data mali_gpu_data =
+{
+       .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+       .fb_start = 0x40000000,
+       .fb_size = 0xb1000000,
+/*     .utilization_interval = 1000, *//* 1000ms */
+       .utilization_interval = 100, /* 100ms in Tizen */
+       .utilization_handler = mali_gpu_utilization_handler,
+};
+
+int mali_platform_device_register(void)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+       /* Connect resources to the device */
+       err = platform_device_add_resources(&exynos4_device_g3d, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
+       if (0 == err)
+       {
+               err = platform_device_add_data(&exynos4_device_g3d, &mali_gpu_data, sizeof(mali_gpu_data));
+               if (0 == err)
+               {
+                       mali_platform_init();
+
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+                       pm_runtime_set_autosuspend_delay(&(exynos4_device_g3d.dev), 300);
+                       pm_runtime_use_autosuspend(&(exynos4_device_g3d.dev));
+#endif
+                       pm_runtime_enable(&(exynos4_device_g3d.dev));
+#endif
+                       return 0;
+               }
+
+       }
+       return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+       mali_platform_deinit();
+}
+
+static void mali_platform_device_release(struct device *device)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+}
+
+static int mali_os_suspend(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_os_suspend() called\n"));
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->suspend)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->suspend(device);
+       }
+
+       mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+
+       return ret;
+}
+
+static int mali_os_resume(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_os_resume() called\n"));
+
+       mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->resume)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->resume(device);
+       }
+
+       return ret;
+}
+
+static int mali_os_freeze(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_os_freeze() called\n"));
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->freeze)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->freeze(device);
+       }
+
+       return ret;
+}
+
+static int mali_os_thaw(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_os_thaw() called\n"));
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->thaw)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->thaw(device);
+       }
+
+       return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_runtime_suspend(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_runtime_suspend() called\n"));
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->runtime_suspend)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->runtime_suspend(device);
+       }
+
+       mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+
+       return ret;
+}
+
+static int mali_runtime_resume(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_runtime_resume() called\n"));
+
+       mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->runtime_resume)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->runtime_resume(device);
+       }
+
+       return ret;
+}
+
+static int mali_runtime_idle(struct device *device)
+{
+       MALI_DEBUG_PRINT(4, ("mali_runtime_idle() called\n"));
+
+       if (NULL != device->driver &&
+           NULL != device->driver->pm &&
+           NULL != device->driver->pm->runtime_idle)
+       {
+               /* Need to notify Mali driver about this event */
+               int ret = device->driver->pm->runtime_idle(device);
+               if (0 != ret)
+               {
+                       return ret;
+               }
+       }
+
+       pm_runtime_suspend(device);
+
+       return 0;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/platform/redwood/exynos4_pmm.c b/drivers/gpu/arm/mali400/mali/platform/redwood/exynos4_pmm.c
new file mode 100644 (file)
index 0000000..09d0f98
--- /dev/null
@@ -0,0 +1,1010 @@
+/* drivers/gpu/mali400/mali/platform/pegasus-m400/exynos4_pmm.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "exynos4_pmm.h"
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+/* Some defines changed names in later Odroid-A kernels. Make sure it works for both. */
+#ifndef S5P_G3D_CONFIGURATION
+#define S5P_G3D_CONFIGURATION S5P_PMU_G3D_CONF
+#endif
+#ifndef S5P_G3D_STATUS
+#define S5P_G3D_STATUS S5P_PMU_G3D_CONF + 0x4
+#endif
+#if defined(CONFIG_PM_RUNTIME)
+#include <plat/pd.h>
+#endif
+#else
+/* Some defines changed names in later Odroid-A kernels. Make sure it works for both. */
+#ifndef S5P_G3D_CONFIGURATION
+#define S5P_G3D_CONFIGURATION EXYNOS4_G3D_CONFIGURATION
+#endif
+#ifndef S5P_G3D_STATUS
+#define S5P_G3D_STATUS (EXYNOS4_G3D_CONFIGURATION + 0x4)
+#endif
+#ifndef S5P_INT_LOCAL_PWR_EN
+#define S5P_INT_LOCAL_PWR_EN EXYNOS_INT_LOCAL_PWR_EN
+#endif
+#endif
+
+#include <asm/io.h>
+#include <mach/regs-pmu.h>
+#include <linux/pm_qos.h>
+#ifdef CONFIG_EXYNOS_BUSFREQ_OPP
+#include <mach/busfreq_exynos4.h>
+#endif
+
+#include <linux/workqueue.h>
+
+#define MALI_DVFS_STEPS 5
+#define MALI_DVFS_WATING 10 /* msec */
+#define MALI_DVFS_DEFAULT_STEP 1
+
+#ifdef CONFIG_CPU_FREQ
+#include <mach/asv.h>
+#define EXYNOS4_ASV_ENABLED
+#endif
+
+#define MALI_DVFS_CLK_DEBUG 0
+#define SEC_THRESHOLD 1
+
+static int bMaliDvfsRun = 0;
+
+typedef struct mali_dvfs_tableTag{
+       unsigned int clock;
+       unsigned int freq;
+       unsigned int vol;
+#if SEC_THRESHOLD
+       unsigned int downthreshold;
+       unsigned int upthreshold;
+#endif
+}mali_dvfs_table;
+
+typedef struct mali_dvfs_statusTag{
+       unsigned int currentStep;
+       mali_dvfs_table * pCurrentDvfs;
+
+} mali_dvfs_status_t;
+
+/* dvfs status */
+mali_dvfs_status_t maliDvfsStatus;
+int mali_dvfs_control;
+
+typedef struct mali_runtime_resumeTag{
+       int clk;
+       int vol;
+       unsigned int step;
+}mali_runtime_resume_table;
+
+/*mali_runtime_resume_table mali_runtime_resume = {266, 900000, 1};*/
+mali_runtime_resume_table mali_runtime_resume = {160, 875000, 1};
+
+/* dvfs table */
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS]={
+                       /*step 0*/{160  ,1000000    ,875000    , 0   , 70},
+                       /*step 1*/{266  ,1000000    ,900000    ,62   , 90},
+                       /*step 2*/{350  ,1000000    ,950000    ,85   , 90},
+                       /*step 3*/{440  ,1000000    ,1025000   ,85   , 90},
+                       /*step 4*/{533  ,1000000    ,1075000   ,85   ,100} };
+
+#ifdef EXYNOS4_ASV_ENABLED
+#define ASV_LEVEL       12     /* ASV0, 1, 11 is reserved */
+#define ASV_LEVEL_PRIME         13  /* ASV0, 1, 12 is reserved */
+
+static unsigned int asv_3d_volt_9_table_for_prime[MALI_DVFS_STEPS][ASV_LEVEL_PRIME] = {
+       {  962500,  937500,  925000,  912500,  900000,  887500,  875000,  862500,  875000,  862500,  850000,  850000,  850000},  /* L4(160Mhz) */
+#if (MALI_DVFS_STEPS > 1)
+       {  987500,  962500,  950000,  937500,  925000,  912500,  900000,  887500,  900000,  887500,  875000,  875000,  875000}, /* L3(266Mhz) */
+#if (MALI_DVFS_STEPS > 2)
+       { 1037500, 1012500, 1000000,  987500,  975000,  962500,  950000,  937500,  950000,  937500,  912500,  900000,  887500}, /* L2(350Mhz) */
+#if (MALI_DVFS_STEPS > 3)
+       { 1100000, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 1012500, 1000000,  975000,  962500,  950000}, /* L1(440Mhz) */
+#if (MALI_DVFS_STEPS > 4)
+       { 1162500, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1075000, 1062500, 1037500, 1025000, 1012500}, /* L0(533Mhz) */
+#endif
+#endif
+#endif
+#endif
+};
+#endif /* ASV_LEVEL */
+
+#define EXTXTALCLK_NAME  "ext_xtal"
+#define VPLLSRCCLK_NAME  "vpll_src"
+#define FOUTVPLLCLK_NAME "fout_vpll"
+#define SCLVPLLCLK_NAME  "sclk_vpll"
+#define GPUMOUT1CLK_NAME "mout_g3d1"
+
+#define MPLLCLK_NAME     "mout_mpll"
+#define GPUMOUT0CLK_NAME "mout_g3d0"
+#define GPUCLK_NAME      "sclk_g3d"
+#define CLK_DIV_STAT_G3D 0x1003C62C
+#define CLK_DESC         "clk-divider-status"
+
+static struct clk *ext_xtal_clock = NULL;
+static struct clk *vpll_src_clock = NULL;
+static struct clk *fout_vpll_clock = NULL;
+static struct clk *sclk_vpll_clock = NULL;
+
+static struct clk *mpll_clock = NULL;
+static struct clk *mali_parent_clock = NULL;
+static struct clk *mali_clock = NULL;
+
+/* Pegasus */
+static const mali_bool bis_vpll = MALI_TRUE;
+int mali_gpu_clk = 440;
+int mali_gpu_vol = 1025000;
+
+static unsigned int GPU_MHZ    =               1000000;
+
+int  gpu_power_state;
+static int bPoweroff;
+static atomic_t clk_active;
+
+#ifdef CONFIG_EXYNOS_BUSFREQ_OPP
+static struct pm_qos_request mali_pm_qos_busfreq;
+#endif
+
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+static struct pm_qos_request mali_pm_qos_cpufreq;
+static atomic_t mali_cur_cpufreq = ATOMIC_INIT(0);
+#endif
+
+#ifdef CONFIG_REGULATOR
+struct regulator *g3d_regulator = NULL;
+#endif
+
+mali_io_address clk_register_map = 0;
+
+/* DVFS */
+unsigned int mali_dvfs_utilization = 255;
+static int mali_gpu_clk_on;
+
+static void mali_dvfs_work_handler(struct work_struct *w);
+
+static struct workqueue_struct *mali_dvfs_wq = 0;
+
+extern mali_io_address clk_register_map;
+
+_mali_osk_lock_t *mali_dvfs_lock = 0;
+
+int mali_runtime_resumed = -1;
+
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_work_handler);
+
+
+/* export GPU frequency as a read-only parameter so that it can be read in /sys */
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_gpu_clk, "GPU frequency in MHz");
+
+/*
+ * The CPU frequency lock is used to guarantee CPU minimum QoS at maximum GPU
+ * clocks. So when GPU clock is 440MHz, CPU QoS is set to minimum 1.2GHz,
+ * and when GPU clock is 533MHz, CPU QoS is set to minimum 1.4GHz.
+ * The other cases, CPU QoS is set to 0.
+ */
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+int cpufreq_lock_by_mali(int freq)
+{
+       if (freq < 0)
+               return _MALI_OSK_ERR_INVALID_ARGS;
+
+       if (atomic_read(&mali_cur_cpufreq) != freq) {
+               pm_qos_update_request(&mali_pm_qos_cpufreq, freq * 1000);
+               atomic_set(&mali_cur_cpufreq, freq);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void cpufreq_unlock_by_mali(void)
+{
+       if (atomic_read(&mali_cur_cpufreq) > 0) {
+               pm_qos_update_request(&mali_pm_qos_cpufreq, 0);
+               atomic_set(&mali_cur_cpufreq, 0);
+       }
+}
+#endif
+
+#ifdef CONFIG_REGULATOR
+void mali_regulator_disable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_disable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_disable(g3d_regulator);
+}
+
+void mali_regulator_enable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_enable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_enable(g3d_regulator);
+}
+
+void mali_regulator_set_voltage(int min_uV, int max_uV)
+{
+       _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+               return;
+       }
+       MALI_DEBUG_PRINT(3, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV));
+       regulator_set_voltage(g3d_regulator, min_uV, max_uV);
+       mali_gpu_vol = regulator_get_voltage(g3d_regulator);
+       MALI_DEBUG_PRINT(1, ("Mali voltage: %d\n", mali_gpu_vol));
+       _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+}
+#endif
+
+unsigned long mali_clk_get_rate(void)
+{
+       return clk_get_rate(mali_clock);
+}
+
+
+static unsigned int get_mali_dvfs_status(void)
+{
+       return maliDvfsStatus.currentStep;
+}
+
+mali_bool mali_clk_get(void)
+{
+       if (bis_vpll)
+       {
+               if (ext_xtal_clock == NULL)
+               {
+                       ext_xtal_clock = clk_get(NULL,EXTXTALCLK_NAME);
+                       if (IS_ERR(ext_xtal_clock)) {
+                               MALI_PRINT( ("MALI Error : failed to get source ext_xtal_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (vpll_src_clock == NULL)
+               {
+                       vpll_src_clock = clk_get(NULL,VPLLSRCCLK_NAME);
+                       if (IS_ERR(vpll_src_clock)) {
+                               MALI_PRINT( ("MALI Error : failed to get source vpll_src_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (fout_vpll_clock == NULL)
+               {
+                       fout_vpll_clock = clk_get(NULL,FOUTVPLLCLK_NAME);
+                       if (IS_ERR(fout_vpll_clock)) {
+                               MALI_PRINT( ("MALI Error : failed to get source fout_vpll_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (sclk_vpll_clock == NULL)
+               {
+                       sclk_vpll_clock = clk_get(NULL,SCLVPLLCLK_NAME);
+                       if (IS_ERR(sclk_vpll_clock)) {
+                               MALI_PRINT( ("MALI Error : failed to get source sclk_vpll_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (mali_parent_clock == NULL)
+               {
+                       mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);
+
+                       if (IS_ERR(mali_parent_clock)) {
+                               MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+       }
+       else /* mpll */
+       {
+               if (mpll_clock == NULL)
+               {
+                       mpll_clock = clk_get(NULL,MPLLCLK_NAME);
+
+                       if (IS_ERR(mpll_clock)) {
+                               MALI_PRINT( ("MALI Error : failed to get source mpll clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (mali_parent_clock == NULL)
+               {
+                       mali_parent_clock = clk_get(NULL, GPUMOUT0CLK_NAME);
+
+                       if (IS_ERR(mali_parent_clock)) {
+                               MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+       }
+
+       // mali clock get always.
+       if (mali_clock == NULL)
+       {
+               mali_clock = clk_get(NULL, GPUCLK_NAME);
+
+               if (IS_ERR(mali_clock)) {
+                       MALI_PRINT( ("MALI Error : failed to get source mali clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       return MALI_TRUE;
+}
+
+void mali_clk_put(mali_bool binc_mali_clock)
+{
+       if (mali_parent_clock)
+       {
+               clk_put(mali_parent_clock);
+               mali_parent_clock = NULL;
+       }
+
+       if (mpll_clock)
+       {
+               clk_put(mpll_clock);
+               mpll_clock = NULL;
+       }
+
+       if (sclk_vpll_clock)
+       {
+               clk_put(sclk_vpll_clock);
+               sclk_vpll_clock = NULL;
+       }
+
+       if (binc_mali_clock && fout_vpll_clock)
+       {
+               clk_put(fout_vpll_clock);
+               fout_vpll_clock = NULL;
+       }
+
+       if (vpll_src_clock)
+       {
+               clk_put(vpll_src_clock);
+               vpll_src_clock = NULL;
+       }
+
+       if (ext_xtal_clock)
+       {
+               clk_put(ext_xtal_clock);
+               ext_xtal_clock = NULL;
+       }
+
+       if (binc_mali_clock && mali_clock)
+       {
+               clk_put(mali_clock);
+               mali_clock = NULL;
+       }
+}
+
+void mali_clk_set_rate(unsigned int clk, unsigned int mhz)
+{
+       int err;
+       unsigned long rate = (unsigned long)clk * (unsigned long)mhz;
+
+       _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+       MALI_DEBUG_PRINT(3, ("Mali platform: Setting frequency to %d mhz\n", clk));
+
+       if (mali_clk_get() == MALI_FALSE)
+               return;
+
+       if (bis_vpll)
+       {
+               clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
+               clk_set_parent(vpll_src_clock, ext_xtal_clock);
+               clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+
+               clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+               clk_set_parent(mali_clock, mali_parent_clock);
+       }
+       else
+       {
+               clk_set_parent(mali_parent_clock, mpll_clock);
+               clk_set_parent(mali_clock, mali_parent_clock);
+       }
+
+       if (!atomic_read(&clk_active)) {
+               if (clk_enable(mali_clock) < 0)
+                       return;
+               atomic_set(&clk_active, 1);
+       }
+
+       err = clk_set_rate(mali_clock, rate);
+       if (err) MALI_PRINT_ERROR(("Failed to set Mali clock: %d\n", err));
+
+       rate = mali_clk_get_rate();
+
+       MALI_DEBUG_PRINT(3, ("Mali frequency %d\n", rate / mhz));
+       GPU_MHZ = mhz;
+       mali_gpu_clk = (int)(rate / mhz);
+
+       mali_clk_put(MALI_FALSE);
+
+       _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+int get_mali_dvfs_control_status(void)
+{
+       return mali_dvfs_control;
+}
+
+mali_bool set_mali_dvfs_current_step(unsigned int step)
+{
+       _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+       maliDvfsStatus.currentStep = step % MALI_DVFS_STEPS;
+       if (step >= MALI_DVFS_STEPS)
+               mali_runtime_resumed = maliDvfsStatus.currentStep;
+
+       _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+       return MALI_TRUE;
+}
+
+
+static mali_bool set_mali_dvfs_status(u32 step,mali_bool boostup)
+{
+       u32 validatedStep=step;
+#if MALI_DVFS_CLK_DEBUG
+       unsigned int *pRegMaliClkDiv;
+       unsigned int *pRegMaliMpll;
+#endif
+       int err;
+
+       if(boostup)     {
+#ifdef CONFIG_REGULATOR
+               /*change the voltage*/
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+               /*change the clock*/
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+       } else {
+               /*change the clock*/
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+#ifdef CONFIG_REGULATOR
+               /*change the voltage*/
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+       mali_clk_put(MALI_FALSE);
+
+#if MALI_DVFS_CLK_DEBUG
+       pRegMaliClkDiv = ioremap(0x1003c52c,32);
+       pRegMaliMpll = ioremap(0x1003c22c,32);
+       MALI_PRINT( ("Mali MPLL reg:%d, CLK DIV: %d \n",*pRegMaliMpll, *pRegMaliClkDiv));
+#endif
+
+       set_mali_dvfs_current_step(validatedStep);
+       /*for future use*/
+       maliDvfsStatus.pCurrentDvfs = &mali_dvfs[validatedStep];
+
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+       /* lock/unlock CPU freq by Mali */
+       if (mali_dvfs[step].clock >= 530)
+               err = cpufreq_lock_by_mali(1400);
+       else if (mali_dvfs[step].clock >= 440)
+               err = cpufreq_lock_by_mali(1200);
+       else
+               cpufreq_unlock_by_mali();
+#endif
+
+
+       return MALI_TRUE;
+}
+
+static void mali_platform_wating(u32 msec)
+{
+       /*
+       * sample wating
+       * change this in the future with proper check routine.
+       */
+       unsigned int read_val;
+       while(1)
+       {
+#ifdef CONFIG_SLP_MALI_DBG
+               read_val = _mali_osk_mem_ioread32_cpu(clk_register_map, 0x00);
+#else
+               read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
+#endif
+               if ((read_val & 0x8000)==0x0000) break;
+
+               _mali_osk_time_ubusydelay(100); /* 1000 -> 100 : 20101218 */
+       }
+       /* _mali_osk_time_ubusydelay(msec*1000);*/
+}
+
+static mali_bool change_mali_dvfs_status(u32 step, mali_bool boostup )
+{
+       MALI_DEBUG_PRINT(4, ("> change_mali_dvfs_status: %d, %d \n",step, boostup));
+
+       if(!set_mali_dvfs_status(step, boostup))
+       {
+               MALI_DEBUG_PRINT(1, ("error on set_mali_dvfs_status: %d, %d \n",step, boostup));
+               return MALI_FALSE;
+       }
+
+       /*wait until clock and voltage is stablized*/
+       mali_platform_wating(MALI_DVFS_WATING); /*msec*/
+
+       return MALI_TRUE;
+}
+
+#ifdef EXYNOS4_ASV_ENABLED
+extern unsigned int exynos_result_of_asv;
+
+static mali_bool mali_dvfs_table_update(void)
+{
+       unsigned int i;
+       unsigned int step_num = MALI_DVFS_STEPS;
+
+       MALI_PRINT(("::P::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+       for (i = 0; i < step_num; i++) {
+               mali_dvfs[i].vol = asv_3d_volt_9_table_for_prime[i][exynos_result_of_asv];
+               MALI_PRINT(("mali_dvfs[%d].vol = %d \n", i, mali_dvfs[i].vol));
+       }
+       return MALI_TRUE;
+}
+#endif
+
+
+static unsigned int decideNextStatus(unsigned int utilization)
+{
+       static unsigned int level = 0;
+       int iStepCount = 0;
+       if (mali_runtime_resumed >= 0) {
+               level = mali_runtime_resumed;
+               mali_runtime_resumed = -1;
+       }
+
+       if (mali_dvfs_control == 0 && level == get_mali_dvfs_status()) {
+               if (utilization > (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].upthreshold / 100) &&
+                               level < MALI_DVFS_STEPS - 1) {
+                       level++;
+               }
+               else if (utilization < (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].downthreshold / 100) &&
+                               level > 0) {
+                       level--;
+               }
+       } else {
+               for (iStepCount = MALI_DVFS_STEPS-1; iStepCount >= 0; iStepCount--) {
+                       if ( mali_dvfs_control >= mali_dvfs[iStepCount].clock ) {
+                               maliDvfsStatus.currentStep = iStepCount;
+                               level = iStepCount;
+                               break;
+                       }
+               }
+       }
+
+       return level;
+}
+
+static mali_bool mali_dvfs_status(unsigned int utilization)
+{
+       unsigned int nextStatus = 0;
+       unsigned int curStatus = 0;
+       mali_bool boostup = MALI_FALSE;
+#ifdef EXYNOS4_ASV_ENABLED
+       static mali_bool asv_applied = MALI_FALSE;
+#endif
+
+#ifdef EXYNOS4_ASV_ENABLED
+       if (asv_applied == MALI_FALSE) {
+               mali_dvfs_table_update();
+               change_mali_dvfs_status(1, 0);
+               asv_applied = MALI_TRUE;
+
+               return MALI_TRUE;
+       }
+#endif
+
+       MALI_DEBUG_PRINT(4, ("> mali_dvfs_status: %d \n",utilization));
+
+       /*decide next step*/
+       curStatus = get_mali_dvfs_status();
+       nextStatus = decideNextStatus(utilization);
+
+       MALI_DEBUG_PRINT(4, ("= curStatus %d, nextStatus %d, maliDvfsStatus.currentStep %d \n", curStatus, nextStatus, maliDvfsStatus.currentStep));
+       /*if next status is same with current status, don't change anything*/
+       if(curStatus!=nextStatus)
+       {
+               /*check if boost up or not*/
+               if(nextStatus > maliDvfsStatus.currentStep) boostup = 1;
+
+               /*change mali dvfs status*/
+               if(!change_mali_dvfs_status(nextStatus,boostup))
+               {
+                       MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
+                       return MALI_FALSE;
+               }
+       }
+       return MALI_TRUE;
+}
+
+
+int mali_dvfs_is_running(void)
+{
+       return bMaliDvfsRun;
+}
+
+
+static void mali_dvfs_work_handler(struct work_struct *w)
+{
+       bMaliDvfsRun=1;
+
+       MALI_DEBUG_PRINT(3, ("=== mali_dvfs_work_handler\n"));
+
+       if(!mali_dvfs_status(mali_dvfs_utilization))
+       MALI_DEBUG_PRINT(1,( "error on mali dvfs status in mali_dvfs_work_handler"));
+
+       bMaliDvfsRun=0;
+}
+
+
+mali_bool init_mali_dvfs_status(void)
+{
+       /*
+       * default status
+       * add here with the right function to get initilization value.
+       */
+
+       if (!mali_dvfs_wq)
+       {
+               mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+       }
+
+       /*add a error handling here*/
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+       pm_qos_add_request(&mali_pm_qos_cpufreq, PM_QOS_CPU_FREQ_MIN, 0);
+#endif
+
+       return MALI_TRUE;
+}
+
+void deinit_mali_dvfs_status(void)
+{
+       if (mali_dvfs_wq)
+       {
+               destroy_workqueue(mali_dvfs_wq);
+               mali_dvfs_wq = NULL;
+       }
+
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+       pm_qos_remove_request(&mali_pm_qos_cpufreq);
+#endif
+}
+
+mali_bool mali_dvfs_handler(unsigned int utilization)
+{
+       mali_dvfs_utilization = utilization;
+       queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
+
+       return MALI_TRUE;
+}
+
+static mali_bool init_mali_clock(void)
+{
+       mali_bool ret = MALI_TRUE;
+       gpu_power_state = 0;
+       bPoweroff = 1;
+
+       if (mali_clock != 0)
+               return ret; /* already initialized */
+
+       mali_dvfs_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+                       | _MALI_OSK_LOCKFLAG_ONELOCK, 0, 0);
+       if (mali_dvfs_lock == NULL)
+               return _MALI_OSK_ERR_FAULT;
+
+
+
+       if (!mali_clk_get())
+       {
+               MALI_PRINT(("Error: Failed to get Mali clock\n"));
+               goto err_clk;
+       }
+
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+
+       MALI_PRINT(("init_mali_clock mali_clock %x\n", mali_clock));
+
+#ifdef CONFIG_REGULATOR
+       g3d_regulator = regulator_get(NULL, "vdd_g3d");
+
+       if (IS_ERR(g3d_regulator))
+       {
+               MALI_PRINT( ("MALI Error : failed to get vdd_g3d\n"));
+               ret = MALI_FALSE;
+               goto err_regulator;
+       }
+
+       regulator_enable(g3d_regulator);
+       mali_regulator_set_voltage(mali_gpu_vol, mali_gpu_vol);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+
+       mali_clk_put(MALI_FALSE);
+
+       mali_gpu_clk_on = mali_gpu_clk;
+
+       return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+err_regulator:
+       regulator_put(g3d_regulator);
+#endif
+err_clk:
+       mali_clk_put(MALI_TRUE);
+
+       return ret;
+}
+
+static mali_bool deinit_mali_clock(void)
+{
+       if (mali_clock == 0)
+               return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+       if (g3d_regulator)
+       {
+               regulator_put(g3d_regulator);
+               g3d_regulator = NULL;
+       }
+#endif
+
+       mali_clk_put(MALI_TRUE);
+
+       return MALI_TRUE;
+}
+
+
+static _mali_osk_errcode_t enable_mali_clocks(void)
+{
+       int err;
+
+       if (!atomic_read(&clk_active)) {
+               err = clk_enable(mali_clock);
+               MALI_DEBUG_PRINT(3,("enable_mali_clocks mali_clock %p error %d \n", mali_clock, err));
+               atomic_set(&clk_active, 1);
+       }
+
+       mali_gpu_clk = mali_gpu_clk_on;
+
+       /* set clock rate */
+#ifdef CONFIG_MALI_DVFS
+       if (get_mali_dvfs_control_status() != 0 || mali_gpu_clk >= mali_runtime_resume.clk) {
+               mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+       } else {
+#ifdef CONFIG_REGULATOR
+               mali_regulator_set_voltage(mali_runtime_resume.vol, mali_runtime_resume.vol);
+#endif
+               mali_clk_set_rate(mali_runtime_resume.clk, GPU_MHZ);
+               set_mali_dvfs_current_step(mali_runtime_resume.step);
+       }
+#else
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+#endif
+
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+       /* lock/unlock CPU freq by Mali */
+       if (mali_gpu_clk >= 530)
+               err = cpufreq_lock_by_mali(1400);
+       else if (mali_gpu_clk >= 440)
+               err = cpufreq_lock_by_mali(1200);
+       else
+               cpufreq_unlock_by_mali();
+#endif
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t disable_mali_clocks(void)
+{
+       if (atomic_read(&clk_active)) {
+               clk_disable(mali_clock);
+               atomic_set(&clk_active, 0);
+       }
+
+       MALI_DEBUG_PRINT(3,("disable_mali_clocks mali_clock %p \n", mali_clock));
+
+#ifdef CONFIG_ARM_EXYNOS_CPUFREQ
+       cpufreq_unlock_by_mali();
+#endif
+
+       /* to reflect the gpu clock off state */
+       mali_gpu_clk_on = mali_gpu_clk;
+       mali_gpu_clk = 0;
+
+       MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t g3d_power_domain_control(int bpower_on)
+{
+       if (bpower_on)
+       {
+               void __iomem *status;
+               u32 timeout;
+               __raw_writel(EXYNOS_INT_LOCAL_PWR_EN, S5P_G3D_CONFIGURATION);
+               status = S5P_G3D_STATUS;
+
+               timeout = 10;
+               while ((__raw_readl(status) & EXYNOS_INT_LOCAL_PWR_EN)
+                       != EXYNOS_INT_LOCAL_PWR_EN) {
+                       if (timeout == 0) {
+                               MALI_PRINTF(("Power domain  enable failed.\n"));
+                               return -ETIMEDOUT;
+                       }
+                       timeout--;
+                       _mali_osk_time_ubusydelay(100);
+               }
+       }
+       else
+       {
+               void __iomem *status;
+               u32 timeout;
+               __raw_writel(0, S5P_G3D_CONFIGURATION);
+
+               status = S5P_G3D_STATUS;
+               /* Wait max 1ms */
+               timeout = 10;
+               while (__raw_readl(status) & EXYNOS_INT_LOCAL_PWR_EN)
+               {
+                       if (timeout == 0) {
+                               MALI_PRINTF(("Power domain  disable failed.\n" ));
+                               return -ETIMEDOUT;
+                       }
+                       timeout--;
+                       _mali_osk_time_ubusydelay( 100);
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_init(void)
+{
+       MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
+#ifdef CONFIG_MALI_DVFS
+       if (!clk_register_map) clk_register_map = _mali_osk_mem_mapioregion( CLK_DIV_STAT_G3D, 0x20, CLK_DESC );
+       if(!init_mali_dvfs_status())
+               MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n"));
+#endif
+
+#ifdef CONFIG_EXYNOS_BUSFREQ_OPP
+       pm_qos_add_request(&mali_pm_qos_busfreq, PM_QOS_BUS_THROUGHPUT, 0);
+#endif
+
+       mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(void)
+{
+
+       mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+       deinit_mali_clock();
+
+#ifdef CONFIG_MALI_DVFS
+       deinit_mali_dvfs_status();
+       if (clk_register_map )
+       {
+               _mali_osk_mem_unmapioregion(CLK_DIV_STAT_G3D, 0x20, clk_register_map);
+               clk_register_map = NULL;
+       }
+#endif
+
+#ifdef CONFIG_EXYNOS_BUSFREQ_OPP
+       pm_qos_remove_request(&mali_pm_qos_busfreq);
+#endif
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+       switch (power_mode)
+       {
+               case MALI_POWER_MODE_ON:
+                       MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n",
+                                            bPoweroff ? "powering on" : "already on"));
+                       if (bPoweroff == 1)
+                       {
+#if !defined(CONFIG_PM_RUNTIME)
+                               g3d_power_domain_control(1);
+#endif
+                               MALI_DEBUG_PRINT(4,("enable clock \n"));
+                               enable_mali_clocks();
+#if defined(CONFIG_MALI400_PROFILING)
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                               mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+
+#endif
+                               bPoweroff=0;
+                       }
+                       break;
+               case MALI_POWER_MODE_LIGHT_SLEEP:
+               case MALI_POWER_MODE_DEEP_SLEEP:
+                       MALI_DEBUG_PRINT(3, ("Mali platform: Got %s event, %s\n", power_mode ==
+                                               MALI_POWER_MODE_LIGHT_SLEEP ?  "MALI_POWER_MODE_LIGHT_SLEEP" :
+                                               "MALI_POWER_MODE_DEEP_SLEEP", bPoweroff ? "already off" : "powering off"));
+                       if (bPoweroff == 0)
+                       {
+                               disable_mali_clocks();
+#if defined(CONFIG_MALI400_PROFILING)
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                               0, 0, 0, 0, 0);
+#endif
+
+#if !defined(CONFIG_PM_RUNTIME)
+                               g3d_power_domain_control(0);
+#endif
+                               bPoweroff=1;
+                       }
+
+                       break;
+       }
+       MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(unsigned int utilization)
+{
+       if (bPoweroff==0)
+       {
+#ifdef CONFIG_MALI_DVFS
+               if(!mali_dvfs_handler(utilization))
+                       MALI_DEBUG_PRINT(1,( "error on mali dvfs status in utilization\n"));
+#endif
+       }
+}
diff --git a/drivers/gpu/arm/mali400/mali/platform/redwood/exynos4_pmm.h b/drivers/gpu/arm/mali400/mali/platform/redwood/exynos4_pmm.h
new file mode 100644 (file)
index 0000000..ef98c9c
--- /dev/null
@@ -0,0 +1,96 @@
+/* drivers/gpu/mali400/mali/platform/pegasus-m400/exynos4_pmm.h
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.h
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifndef __EXYNOS4_PMM_H__
+#define __EXYNOS4_PMM_H__
+
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,
+       MALI_POWER_MODE_LIGHT_SLEEP,
+       MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(void);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(void);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ *  1) MALI_POWER_MODE_ON
+ *  2) MALI_POWER_MODE_LIGHT_SLEEP
+ *  3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(unsigned int utilization);
+
+_mali_osk_errcode_t g3d_power_domain_control(int bpower_on);
+
+#ifdef CONFIG_REGULATOR
+void mali_regulator_disable(void);
+void mali_regulator_enable(void);
+void mali_regulator_set_voltage(int min_uV, int max_uV);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/regs/mali_200_regs.h b/drivers/gpu/arm/mali400/mali/regs/mali_200_regs.h
new file mode 100644 (file)
index 0000000..1a4d8b8
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ *  Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+       MALI200_REG_ADDR_MGMT_VERSION                              = 0x1000,
+       MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR               = 0x1004,
+       MALI200_REG_ADDR_MGMT_STATUS                               = 0x1008,
+       MALI200_REG_ADDR_MGMT_CTRL_MGMT                            = 0x100c,
+
+       MALI200_REG_ADDR_MGMT_INT_RAWSTAT                          = 0x1020,
+       MALI200_REG_ADDR_MGMT_INT_CLEAR                            = 0x1024,
+       MALI200_REG_ADDR_MGMT_INT_MASK                             = 0x1028,
+       MALI200_REG_ADDR_MGMT_INT_STATUS                           = 0x102c,
+
+       MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW                   = 0x1044,
+
+       MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS                     = 0x1050,
+
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE                    = 0x1080,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC                       = 0x1084,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE                     = 0x108c,
+
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE                    = 0x10a0,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC                       = 0x10a4,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE                     = 0x10ac,
+
+       MALI200_REG_SIZEOF_REGISTER_BANK                           = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+       MALI200_REG_VAL_CTRL_MGMT_STOP_BUS         = (1<<0),
+       MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES     = (1<<3),
+       MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET      = (1<<5),
+       MALI200_REG_VAL_CTRL_MGMT_START_RENDERING  = (1<<6),
+       MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET     = (1<<7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+       MALI200_REG_VAL_IRQ_END_OF_FRAME          = (1<<0),
+       MALI200_REG_VAL_IRQ_END_OF_TILE           = (1<<1),
+       MALI200_REG_VAL_IRQ_HANG                  = (1<<2),
+       MALI200_REG_VAL_IRQ_FORCE_HANG            = (1<<3),
+       MALI200_REG_VAL_IRQ_BUS_ERROR             = (1<<4),
+       MALI200_REG_VAL_IRQ_BUS_STOP              = (1<<5),
+       MALI200_REG_VAL_IRQ_CNT_0_LIMIT           = (1<<6),
+       MALI200_REG_VAL_IRQ_CNT_1_LIMIT           = (1<<7),
+       MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR  = (1<<8),
+       MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW  = (1<<10),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW   = (1<<11),
+       MALI400PP_REG_VAL_IRQ_RESET_COMPLETED       = (1<<12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL  ((enum mali200_mgmt_irq) (\
+    MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+    MALI200_REG_VAL_IRQ_END_OF_TILE                            |\
+    MALI200_REG_VAL_IRQ_HANG                                   |\
+    MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+    MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+    MALI200_REG_VAL_IRQ_BUS_STOP                               |\
+    MALI200_REG_VAL_IRQ_CNT_0_LIMIT                            |\
+    MALI200_REG_VAL_IRQ_CNT_1_LIMIT                            |\
+    MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+    MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW                    |\
+    MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+    MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+    MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+    MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+    MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+    MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+       MALI200_REG_VAL_STATUS_RENDERING_ACTIVE     = (1<<0),
+       MALI200_REG_VAL_STATUS_BUS_STOPPED          = (1<<4),
+};
+
+enum mali200_render_unit
+{
+       MALI200_REG_ADDR_FRAME = 0x0000,
+       MALI200_REG_ADDR_RSW   = 0x0004,
+       MALI200_REG_ADDR_STACK = 0x0030,
+       MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+       MALI200_REG_ADDR_ORIGIN_OFFSET_X  = 0x0040
+};
+
+enum mali200_wb_unit {
+    MALI200_REG_ADDR_WB0 = 0x0100,
+    MALI200_REG_ADDR_WB1 = 0x0200,
+    MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+       MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/arm/mali400/mali/regs/mali_gp_regs.h b/drivers/gpu/arm/mali400/mali/regs/mali_gp_regs.h
new file mode 100644 (file)
index 0000000..eedb228
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+       MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR           = 0x00,
+       MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR             = 0x04,
+       MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR         = 0x08,
+       MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR           = 0x0c,
+       MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR     = 0x10,
+       MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR       = 0x14,
+       MALIGP2_REG_ADDR_MGMT_CMD                       = 0x20,
+       MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT               = 0x24,
+       MALIGP2_REG_ADDR_MGMT_INT_CLEAR                 = 0x28,
+       MALIGP2_REG_ADDR_MGMT_INT_MASK                  = 0x2C,
+       MALIGP2_REG_ADDR_MGMT_INT_STAT                  = 0x30,
+       MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW           = 0x34,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE         = 0x3C,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE         = 0x40,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC            = 0x44,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC            = 0x48,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE          = 0x4C,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE          = 0x50,
+       MALIGP2_REG_ADDR_MGMT_STATUS                    = 0x68,
+       MALIGP2_REG_ADDR_MGMT_VERSION                   = 0x6C,
+       MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ      = 0x80,
+       MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ     = 0x84,
+       MALIGP2_CONTR_AXI_BUS_ERROR_STAT                = 0x94,
+       MALIGP2_REGISTER_ADDRESS_SPACE_SIZE             = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ *  @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+       MALIGP2_REG_VAL_CMD_START_VS                    = (1<< 0),
+       MALIGP2_REG_VAL_CMD_START_PLBU                  = (1<< 1),
+       MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC   = (1<< 4),
+       MALIGP2_REG_VAL_CMD_RESET                               = (1<< 5),
+       MALIGP2_REG_VAL_CMD_FORCE_HANG                  = (1<< 6),
+       MALIGP2_REG_VAL_CMD_STOP_BUS                    = (1<< 9),
+       MALI400GP_REG_VAL_CMD_SOFT_RESET                = (1<<10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/**  @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ *  @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ *       MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG                (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG          (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR          (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS  (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+       (\
+               MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+               MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+               MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+               MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          | \
+               MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        | \
+               MALIGP2_REG_VAL_IRQ_HANG                | \
+               MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+               MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    | \
+               MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    | \
+               MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+               MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+               MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+               MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     | \
+               MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+               MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+               MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+               MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+       (\
+               MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+               MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+               MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+               MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+               MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+               MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+               MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+               MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+               MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+               MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ *  @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE         0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED       0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE       0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR         0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR   0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+       MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+       MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+       MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+       MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ *  @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+       MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/arm/mali400/mali/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/arm/mali400/mali/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644 (file)
index 0000000..213596a
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/mali400/mali/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/arm/mali400/mali/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644 (file)
index 0000000..248548b
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+       /*
+        * reset counters and overflow flags
+        */
+
+    u32 mask = (1 << 0) | /* enable all three counters */
+                  (0 << 1) | /* reset both Count Registers to 0x0 */
+                  (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+                  (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+                  (0 << 4) | /* Count Register 0 interrupt enable */
+                  (0 << 5) | /* Count Register 1 interrupt enable */
+                  (0 << 6) | /* Cycle Counter interrupt enable */
+                  (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+                  (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+                  (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+       __asm__ __volatile__ ("MCR    p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+       u32 result;
+
+       /* this is for the clock cycles */
+       __asm__ __volatile__ ("MRC    p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+       return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm/mali400/mali/timestamp-default/mali_timestamp.c b/drivers/gpu/arm/mali400/mali/timestamp-default/mali_timestamp.c
new file mode 100644 (file)
index 0000000..213596a
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/mali400/mali/timestamp-default/mali_timestamp.h b/drivers/gpu/arm/mali400/mali/timestamp-default/mali_timestamp.h
new file mode 100644 (file)
index 0000000..867253f
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+       return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/Kbuild b/drivers/gpu/arm/mali400/r4p0_rel0/Kbuild
new file mode 100644 (file)
index 0000000..4628b4b
--- /dev/null
@@ -0,0 +1,239 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+# 
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# 
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 1
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+MALI_ENABLE_CPU_CYCLES ?= 0
+
+# MALI_SEC
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+ifeq ($(CONFIG_CPU_EXYNOS4212),y)
+    TARGET_PLATFORM=pegasus-m400
+endif
+ifeq ($(CONFIG_CPU_EXYNOS4412),y)
+    TARGET_PLATFORM=pegasus-m400
+endif
+ifeq ($(CONFIG_SOC_EXYNOS4415),y)
+       TARGET_PLATFORM=exynos4415
+endif
+ifeq ($(CONFIG_SOC_EXYNOS3470),y)
+       TARGET_PLATFORM=exynos3470
+endif
+
+ifeq ($(CONFIG_SOC_EXYNOS3250),y)
+       TARGET_PLATFORM=exynos3250
+endif
+
+include $(src)/MALI_CONFIGURATION
+MALI_PLATFORM = $(MALI_PLATFORM-$(TARGET_PLATFORM))
+EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+MALI_PLATFORM_FILES = $(subst $(src)/,,$(wildcard $(src)/platform/$(MALI_PLATFORM)/*.c))
+# End of MALI_SEC
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+ifeq ($(wildcard $(src)/linux/license/gpl/*),)
+    ccflags-y += -I$(src)/linux/license/proprietary
+    ifeq ($(CONFIG_MALI400_PROFILING),y)
+        $(error Profiling is incompatible with non-GPL license)
+    endif
+    ifeq ($(CONFIG_PM_RUNTIME),y)
+        $(error Runtime PM is incompatible with non-GPL license)
+    endif
+    ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
+        $(error DMA-BUF is incompatible with non-GPL license)
+    endif
+    $(error Linux Device integration is incompatible with non-GPL license)
+else
+    ccflags-y += -I$(src)/linux/license/gpl
+endif
+
+mali-y += \
+       linux/mali_osk_atomics.o \
+       linux/mali_osk_irq.o \
+       linux/mali_osk_wq.o \
+       linux/mali_osk_locks.o \
+       linux/mali_osk_wait_queue.o \
+       linux/mali_osk_low_level_mem.o \
+       linux/mali_osk_math.o \
+       linux/mali_osk_memory.o \
+       linux/mali_osk_misc.o \
+       linux/mali_osk_mali.o \
+       linux/mali_osk_notification.o \
+       linux/mali_osk_time.o \
+       linux/mali_osk_timers.o
+
+mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o
+mali-y += linux/mali_memory_external.o
+mali-y += linux/mali_memory_block_alloc.o
+
+mali-y += \
+       linux/mali_ukk_mem.o \
+       linux/mali_ukk_gp.o \
+       linux/mali_ukk_pp.o \
+       linux/mali_ukk_core.o \
+       linux/mali_ukk_soft_job.o \
+       linux/mali_ukk_timeline.o
+
+# Source files which always are included in a build
+mali-y += \
+       common/mali_kernel_core.o \
+       linux/mali_kernel_linux.o \
+       common/mali_kernel_descriptor_mapping.o \
+       common/mali_session.o \
+       linux/mali_device_pause_resume.o \
+       common/mali_kernel_vsync.o \
+       linux/mali_ukk_vsync.o \
+       linux/mali_kernel_sysfs.o \
+       common/mali_mmu.o \
+       common/mali_mmu_page_directory.o \
+       common/mali_mem_validation.o \
+       common/mali_hw_core.o \
+       common/mali_gp.o \
+       common/mali_pp.o \
+       common/mali_pp_job.o \
+       common/mali_gp_job.o \
+       common/mali_soft_job.o \
+       common/mali_scheduler.o \
+       common/mali_gp_scheduler.o \
+       common/mali_pp_scheduler.o \
+       common/mali_group.o \
+       common/mali_dlbu.o \
+       common/mali_broadcast.o \
+       common/mali_pm.o \
+       common/mali_pmu.o \
+       common/mali_user_settings_db.o \
+       common/mali_kernel_utilization.o \
+       common/mali_l2_cache.o \
+       common/mali_dma.o \
+       common/mali_timeline.o \
+       common/mali_timeline_fence_wait.o \
+       common/mali_timeline_sync_fence.o \
+       common/mali_spinlock_reentrant.o \
+       common/mali_pm_domain.o \
+       linux/mali_osk_pm.o \
+       linux/mali_pmu_power_up_down.o \
+       __malidrv_build_info.o
+
+ifneq ($(MALI_PLATFORM_FILES),)
+       mali-y += $(MALI_PLATFORM_FILES:.c=.o)
+endif
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(src)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o
+
+mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
+
+mali-$(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) += common/mali_power_performance_policy.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DPROFILING_SKIP_PP_JOBS=$(PROFILING_SKIP_PP_JOBS)
+ccflags-y += -DPROFILING_SKIP_PP_AND_GP_JOBS=$(PROFILING_SKIP_PP_AND_GP_JOBS)
+
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ccflags-y += -DMALI_STATE_TRACKING=1
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+       ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform
+# MALI_SEC 
+ccflags-y += -I$(src)/../ump/include -I$(src)/include/linux/mali -I$(src)/platform/$(MALI_PLATFORM)
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(src)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(src); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(REPO_URL)
+VERSION_STRINGS += REVISION=$(DRIVER_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
+VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += USING_POWER_PERFORMANCE_POLICY=$(CONFIG_POWER_PERFORMANCE_POLICY)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(src)/__malidrv_build_info.c:
+       @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/Kconfig b/drivers/gpu/arm/mali400/r4p0_rel0/Kconfig
new file mode 100644 (file)
index 0000000..bd4bb2b
--- /dev/null
@@ -0,0 +1,81 @@
+config MALI400_DEBUG
+       bool "Enable debug in Mali driver"
+       depends on MALI400
+       ---help---
+         This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING
+       bool "Enable Mali profiling"
+       depends on MALI400
+       select TRACEPOINTS
+       default n
+       ---help---
+         This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+       bool "Enable internal Mali profiling API"
+       depends on MALI400_PROFILING
+       default n
+       ---help---
+         This enables the internal legacy Mali profiling API.
+
+config MALI_DVFS
+       bool "Enable mali DVFS"
+       depends on MALI400 && PM
+       default y
+       ---help---
+               This enables Mali driver DVFS.
+
+config MALI_DVFS_FULL_LEVEL
+       bool "Enable mali DVFS full level"
+       depends on MALI_DVFS
+       default n
+       ---help---
+               This enables Mali driver DVFS full level.
+
+if CPU_EXYNOS4212 || CPU_EXYNOS4412
+config MALI400_UMP
+       bool "Enable UMP support"
+       depends on MALI400
+       default y
+       ---help---
+         This enables support for the UMP memory sharing API in the Mali driver.
+endif
+
+config MALI400_POWER_PERFORMANCE_POLICY
+       bool "Enable Mali power performance policy"
+       depends on ARM
+       default n
+       ---help---
+         This enables support for dynamic performance scaling of Mali with the goal of lowering power consumption.
+
+config MALI_DMA_BUF_MAP_ON_ATTACH
+       bool "Map dma-buf attachments on attach"
+       depends on MALI400 && DMA_SHARED_BUFFER
+       default y
+       ---help---
+         This makes the Mali driver map dma-buf attachments after doing
+         attach. If this is not set the dma-buf attachments will be mapped for
+         every time the GPU need to access the buffer.
+
+         Mapping for each access can cause lower performance.
+
+config MALI_SHARED_INTERRUPTS
+       bool "Support for shared interrupts"
+       depends on MALI400
+       default n
+       ---help---
+         Adds functionality required to properly support shared interrupts.  Without this support,
+         the device driver will fail during insmod if it detects shared interrupts.  This also
+         works when the GPU is not using shared interrupts, but might have a slight performance
+         impact.
+
+config MALI_PMU_PARALLEL_POWER_UP
+       bool "Power up Mali PMU domains in parallel"
+       depends on MALI400
+       default n
+       ---help---
+         This makes the Mali driver power up all PMU power domains in parallel, instead of
+         powering up domains one by one, with a slight delay in between. Powering on all power
+         domains at the same time may cause peak currents higher than what some systems can handle.
+         These systems must not enable this option.
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/MALI_CONFIGURATION b/drivers/gpu/arm/mali400/r4p0_rel0/MALI_CONFIGURATION
new file mode 100644 (file)
index 0000000..c8c24f7
--- /dev/null
@@ -0,0 +1,26 @@
+# Location of default kernels
+KDIR-odroida:=/projects/pr297/linux/odroid-a/current/linux
+KDIR-odroidpc:=/projects/pr297/linux/odroid-pc/current/linux
+KDIR-odroidq:=/projects/pr297/linux/odroid-q/current/linux
+KDIR-orion:=/projects/pr297/linux/orion/current/linux
+KDIR-pegasus:=/projects/pr297/linux/pegasus-smdk/current/linux
+KDIR-tcc8900:=/projects/pr297/linux/tcc8900/current/linux
+KDIR-pb11mp:=/projects/pr297/linux/pb11mp/current/linux
+KDIR-vea9:=/projects/pr297/linux/vea9/current/linux
+KDIR-snowball:=/no/default/kernel/yet
+
+# Name of platform directory with platform specific code (should be built into kernel on a real system) 
+MALI_PLATFORM-odroida=exynos4
+MALI_PLATFORM-odroidpc=exynos4
+MALI_PLATFORM-odroidq=exynos4
+MALI_PLATFORM-orion=exynos4
+MALI_PLATFORM-pegasus=exynos4
+# MALI_SEC 
+MALI_PLATFORM-pegasus-m400=pegasus-m400
+MALI_PLATFORM-exynos3250=exynos3250
+MALI_PLATFORM-exynos3470=exynos3470
+MALI_PLATFORM-exynos4415=exynos4415
+MALI_PLATFORM-tcc8900=tcc8900
+MALI_PLATFORM-pb11mp=arm
+MALI_PLATFORM-vea9=arm
+MALI_PLATFORM-snowball=ux500
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/Makefile b/drivers/gpu/arm/mali400/r4p0_rel0/Makefile
new file mode 100644 (file)
index 0000000..cce0331
--- /dev/null
@@ -0,0 +1,157 @@
+#
+# Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+# 
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# 
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+USING_POWER_PERFORMANCE_POLICY ?= 0
+MALI_HEATMAPS_ENABLED ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+MALI_PMU_PARALLEL_POWER_UP ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+       $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+       then \
+               echo "$(2)"; \
+       else \
+               echo "$(3)"; \
+       fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+
+export EXTRA_DEFINES += -DCONFIG_MALI400=1
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS required for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+ifeq ($(MALI_HEATMAPS_ENABLED),1)
+export MALI_HEATMAPS_ENABLED=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifeq ($(USING_POWER_PERFORMANCE_POLICY),1)
+export CONFIG_MALI400_POWER_PERFORMANCE_POLICY=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_POWER_PERFORMANCE_POLICY
+endif
+
+ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
+export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
+endif
+
+ifneq ($(BUILD),release)
+export CONFIG_MALI400_DEBUG=y
+endif
+
+all: $(UMP_SYMVERS_FILE)
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+       @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/__malidrv_build_info.c b/drivers/gpu/arm/mali400/r4p0_rel0/__malidrv_build_info.c
new file mode 100644 (file)
index 0000000..1972a6a
--- /dev/null
@@ -0,0 +1 @@
+const char *__malidrv_build_info(void) { return "malidrv:  API_VERSION=401 REPO_URL=heads/s-kitkat-3.4-universal3470-topic REVISION=r4p0-00rel0-Ruby-JB433-3C05R1-g-320-g1a7bdd5 CHANGED_REVISION=Ruby-JB433-3C05R1-g-320-g1a7bdd5 CHANGE_DATE=2013-12-13 16:37:33 +0900 BUILD_DATE=Fri Dec 13 16:57:40 KST 2013 BUILD=release TARGET_PLATFORM=exynos3470 MALI_PLATFORM=exynos3470 KDIR= OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=16 USING_UMP= USING_PROFILING= USING_INTERNAL_PROFILING= USING_GPU_UTILIZATION=1 USING_POWER_PERFORMANCE_POLICY= MALI_UPPER_HALF_SCHEDULING=1";}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_broadcast.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_broadcast.c
new file mode 100644 (file)
index 0000000..42bded2
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static const int bcast_unit_reg_size = 0x1000;
+static const int bcast_unit_addr_broadcast_mask = 0x0;
+static const int bcast_unit_addr_irq_override_mask = 0x4;
+
+struct mali_bcast_unit {
+       struct mali_hw_core hw_core;
+       u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+       struct mali_bcast_unit *bcast_unit = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(resource);
+       MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+
+       bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+       if (NULL == bcast_unit) {
+               MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+               return NULL;
+       }
+
+       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) {
+               bcast_unit->current_mask = 0;
+               mali_bcast_reset(bcast_unit);
+
+               return bcast_unit;
+       } else {
+               MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+       }
+
+       _mali_osk_free(bcast_unit);
+
+       return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       mali_hw_core_delete(&bcast_unit->hw_core);
+       _mali_osk_free(bcast_unit);
+}
+
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+       u32 bcast_id;
+       u32 broadcast_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+       broadcast_mask = bcast_unit->current_mask;
+
+       broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+       broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+       /* store mask so we can restore on reset */
+       bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+       u32 bcast_id;
+       u32 broadcast_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+       broadcast_mask = bcast_unit->current_mask;
+
+       broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+       /* store mask so we can restore on reset */
+       bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       /* set broadcast mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   bcast_unit_addr_broadcast_mask,
+                                   bcast_unit->current_mask);
+
+       /* set IRQ override mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   bcast_unit_addr_irq_override_mask,
+                                   bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       /* set broadcast mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   bcast_unit_addr_broadcast_mask,
+                                   0x0);
+
+       /* set IRQ override mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   bcast_unit_addr_irq_override_mask,
+                                   0x0);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_broadcast.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_broadcast.h
new file mode 100644 (file)
index 0000000..df5f2f9
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/*
+ *  Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 Ã— (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ *   setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+       mali_bcast_reset(bcast_unit);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dlbu.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dlbu.c
new file mode 100644 (file)
index 0000000..c5284b7
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+u32 mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = 0;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+       MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+                                                            31:12 Physical address to the page used for the DLBU
+                                                            0 DLBU enable - set this bit to 1 enables the AXI bus
+                                                            between PPs and L2s, setting to 0 disables the router and
+                                                            no further transactions are sent to DLBU */
+       MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR     = 0x0004, /**< Master tile list virtual base address;
+                                                            31:12 Virtual address to the page used for the DLBU */
+       MALI_DLBU_REGISTER_TLLIST_VBASEADDR        = 0x0008, /**< Tile list virtual base address;
+                                                            31:12 Virtual address to the tile list. This address is used when
+                                                            calculating the call address sent to PP.*/
+       MALI_DLBU_REGISTER_FB_DIM                  = 0x000C, /**< Framebuffer dimension;
+                                                            23:16 Number of tiles in Y direction-1
+                                                            7:0 Number of tiles in X direction-1 */
+       MALI_DLBU_REGISTER_TLLIST_CONF             = 0x0010, /**< Tile list configuration;
+                                                            29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+                                                            21:16 2^n number of tiles to be binned to one tile list in Y direction
+                                                            5:0 2^n number of tiles to be binned to one tile list in X direction */
+       MALI_DLBU_REGISTER_START_TILE_POS          = 0x0014, /**< Start tile positions;
+                                                            31:24 start position in Y direction for group 1
+                                                            23:16 start position in X direction for group 1
+                                                            15:8 start position in Y direction for group 0
+                                                            7:0 start position in X direction for group 0 */
+       MALI_DLBU_REGISTER_PP_ENABLE_MASK          = 0x0018, /**< PP enable mask;
+                                                            7 enable PP7 for load balancing
+                                                            6 enable PP6 for load balancing
+                                                            5 enable PP5 for load balancing
+                                                            4 enable PP4 for load balancing
+                                                            3 enable PP3 for load balancing
+                                                            2 enable PP2 for load balancing
+                                                            1 enable PP1 for load balancing
+                                                            0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum {
+       PP0ENABLE = 0,
+       PP1ENABLE,
+       PP2ENABLE,
+       PP3ENABLE,
+       PP4ENABLE,
+       PP5ENABLE,
+       PP6ENABLE,
+       PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core {
+       struct mali_hw_core     hw_core;           /**< Common for all HW cores */
+       u32                     pp_cores_mask;     /**< This is a mask for the PP cores whose operation will be controlled by LBU
+                                                     see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+       MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+       if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr)) {
+               MALI_SUCCESS;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+       mali_mmu_release_table_page(mali_dlbu_phys_addr, mali_dlbu_cpu_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
+{
+       struct mali_dlbu_core *core = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+       core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+       if (NULL != core) {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) {
+                       core->pp_cores_mask = 0;
+                       if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) {
+                               return core;
+                       }
+                       MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       } else {
+               MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       mali_dlbu_reset(dlbu);
+       mali_hw_core_delete(&dlbu->hw_core);
+       _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+       u32 dlbu_registers[7];
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+       dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+       dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+       dlbu_registers[2] = 0;
+       dlbu_registers[3] = 0;
+       dlbu_registers[4] = 0;
+       dlbu_registers[5] = 0;
+       dlbu_registers[6] = dlbu->pp_cores_mask;
+
+       /* write reset values to core registers */
+       mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+       err = _MALI_OSK_ERR_OK;
+
+       return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+       struct mali_pp_core *pp_core;
+       u32 bcast_id;
+
+       MALI_DEBUG_ASSERT_POINTER( dlbu );
+       MALI_DEBUG_ASSERT_POINTER( group );
+
+       pp_core = mali_group_get_pp_core(group);
+       bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+       dlbu->pp_cores_mask |= bcast_id;
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+       struct mali_pp_core *pp_core;
+       u32 bcast_id;
+
+       MALI_DEBUG_ASSERT_POINTER( dlbu );
+       MALI_DEBUG_ASSERT_POINTER( group );
+
+       pp_core = mali_group_get_pp_core(group);
+       bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+       dlbu->pp_cores_mask &= ~bcast_id;
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+       u32 *registers;
+       MALI_DEBUG_ASSERT(job);
+       registers = mali_pp_job_get_dlbu_registers(job);
+       MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+       /* Writing 4 registers:
+        * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+       mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dlbu.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dlbu.h
new file mode 100644 (file)
index 0000000..b1a59d6
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+
+extern u32 mali_dlbu_phys_addr;
+
+struct mali_dlbu_core;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dma.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dma.c
new file mode 100644 (file)
index 0000000..4d75cc2
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+#include "mali_dma.h"
+
+/**
+ * Size of the Mali-450 DMA unit registers in bytes.
+ */
+#define MALI450_DMA_REG_SIZE 0x08
+
+/**
+ * Value that appears in MEMSIZE if an error occurs when reading the command list.
+ */
+#define MALI450_DMA_BUS_ERR_VAL 0xffffffff
+
+/**
+ * Mali DMA registers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register.
+ */
+typedef enum mali_dma_register {
+
+       MALI450_DMA_REG_SOURCE_ADDRESS = 0x0000,
+       MALI450_DMA_REG_SOURCE_SIZE = 0x0004,
+} mali_dma_register;
+
+struct mali_dma_core {
+       struct mali_hw_core  hw_core;      /**< Common for all HW cores */
+       _mali_osk_spinlock_t *lock;            /**< Lock protecting access to DMA core */
+       mali_dma_pool pool;                /**< Memory pool for command buffers */
+};
+
+static struct mali_dma_core *mali_global_dma_core = NULL;
+
+struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource)
+{
+       struct mali_dma_core* dma;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_dma_core);
+
+       dma = _mali_osk_malloc(sizeof(struct mali_dma_core));
+       if (dma == NULL) goto alloc_failed;
+
+       dma->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DMA_COMMAND);
+       if (NULL == dma->lock) goto lock_init_failed;
+
+       dma->pool = mali_dma_pool_create(MALI_DMA_CMD_BUF_SIZE, 4, 0);
+       if (NULL == dma->pool) goto dma_pool_failed;
+
+       err = mali_hw_core_create(&dma->hw_core, resource, MALI450_DMA_REG_SIZE);
+       if (_MALI_OSK_ERR_OK != err) goto hw_core_failed;
+
+       mali_global_dma_core = dma;
+       MALI_DEBUG_PRINT(2, ("Mali DMA: Created Mali APB DMA unit\n"));
+       return dma;
+
+       /* Error handling */
+
+hw_core_failed:
+       mali_dma_pool_destroy(dma->pool);
+dma_pool_failed:
+       _mali_osk_spinlock_term(dma->lock);
+lock_init_failed:
+       _mali_osk_free(dma);
+alloc_failed:
+       MALI_DEBUG_PRINT(2, ("Mali DMA: Failed to create APB DMA unit\n"));
+       return NULL;
+}
+
+void mali_dma_delete(struct mali_dma_core *dma)
+{
+       MALI_DEBUG_ASSERT_POINTER(dma);
+
+       MALI_DEBUG_PRINT(2, ("Mali DMA: Deleted Mali APB DMA unit\n"));
+
+       mali_hw_core_delete(&dma->hw_core);
+       _mali_osk_spinlock_term(dma->lock);
+       mali_dma_pool_destroy(dma->pool);
+       _mali_osk_free(dma);
+}
+
+static void mali_dma_bus_error(struct mali_dma_core *dma)
+{
+       u32 addr = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS);
+
+       MALI_PRINT_ERROR(("Mali DMA: Bus error when reading command list from 0x%lx\n", addr));
+
+       /* Clear the bus error */
+       mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, 0);
+}
+
+static mali_bool mali_dma_is_busy(struct mali_dma_core *dma)
+{
+       u32 val;
+       mali_bool dma_busy_flag = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT_POINTER(dma);
+
+       val = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE);
+
+       if (MALI450_DMA_BUS_ERR_VAL == val) {
+               /* Bus error reading command list */
+               mali_dma_bus_error(dma);
+               return MALI_FALSE;
+       }
+       if (val > 0) {
+               dma_busy_flag = MALI_TRUE;
+       }
+
+       return dma_busy_flag;
+}
+
+static void mali_dma_start_transfer(struct mali_dma_core* dma, mali_dma_cmd_buf *buf)
+{
+       u32 memsize = buf->size * 4;
+       u32 addr = buf->phys_addr;
+
+       MALI_DEBUG_ASSERT_POINTER(dma);
+       MALI_DEBUG_ASSERT(memsize < (1 << 16));
+       MALI_DEBUG_ASSERT(0 == (memsize & 0x3)); /* 4 byte aligned */
+
+       MALI_DEBUG_ASSERT(!mali_dma_is_busy(dma));
+
+       /* Writes the physical source memory address of chunk containing command headers and data */
+       mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS, addr);
+
+       /* Writes the length of transfer */
+       mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, memsize);
+}
+
+_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf)
+{
+       MALI_DEBUG_ASSERT_POINTER(buf);
+
+       buf->virt_addr = (u32*)mali_dma_pool_alloc(mali_global_dma_core->pool, &buf->phys_addr);
+       if (NULL == buf->virt_addr) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       /* size contains the number of words in the buffer and is incremented
+        * as commands are added to the buffer. */
+       buf->size = 0;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf)
+{
+       MALI_DEBUG_ASSERT_POINTER(buf);
+
+       if (NULL == buf->virt_addr) return;
+
+       mali_dma_pool_free(mali_global_dma_core->pool, buf->virt_addr, buf->phys_addr);
+
+       buf->virt_addr = NULL;
+}
+
+_mali_osk_errcode_t mali_dma_start(struct mali_dma_core* dma, mali_dma_cmd_buf *buf)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+       _mali_osk_spinlock_lock(dma->lock);
+
+       if (mali_dma_is_busy(dma)) {
+               err = _MALI_OSK_ERR_BUSY;
+               goto out;
+       }
+
+       mali_dma_start_transfer(dma, buf);
+
+out:
+       _mali_osk_spinlock_unlock(dma->lock);
+       return err;
+}
+
+void mali_dma_debug(struct mali_dma_core *dma)
+{
+       MALI_DEBUG_ASSERT_POINTER(dma);
+       MALI_DEBUG_PRINT(1, ("DMA unit registers:\n\t%08x, %08x\n",
+                            mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS),
+                            mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE)
+                           ));
+
+}
+
+struct mali_dma_core *mali_dma_get_global_dma_core(void)
+{
+       /* Returns the global dma core object */
+       return mali_global_dma_core;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dma.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_dma.h
new file mode 100644 (file)
index 0000000..4a1435f
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DMA_H__
+#define __MALI_DMA_H__
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_hw_core.h"
+
+#define MALI_DMA_CMD_BUF_SIZE 1024
+
+typedef struct mali_dma_cmd_buf {
+       u32 *virt_addr;           /**< CPU address of command buffer */
+       u32 phys_addr;            /**< Physical address of command buffer */
+       u32 size;                 /**< Number of prepared words in command buffer */
+} mali_dma_cmd_buf;
+
+/** @brief Create a new DMA unit
+ *
+ * This is called from entry point of the driver in order to create and
+ * intialize the DMA resource
+ *
+ * @param resource it will be a pointer to a DMA resource
+ * @return DMA object on success, NULL on failure
+ */
+struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource);
+
+/** @brief Delete DMA unit
+ *
+ * This is called on entry point of driver if the driver initialization fails
+ * after initialization of the DMA unit. It is also called on the exit of the
+ * driver to delete the DMA resource
+ *
+ * @param dma Pointer to DMA unit object
+ */
+void mali_dma_delete(struct mali_dma_core *dma);
+
+/** @brief Retrieves the MALI DMA core object (if there is)
+ *
+ * @return The Mali DMA object otherwise NULL
+ */
+struct mali_dma_core *mali_dma_get_global_dma_core(void);
+
+/**
+ * @brief Run a command buffer on the DMA unit
+ *
+ * @param dma Pointer to the DMA unit to use
+ * @param buf Pointer to the command buffer to use
+ * @return _MALI_OSK_ERR_OK if the buffer was started successfully,
+ *         _MALI_OSK_ERR_BUSY if the DMA unit is busy.
+ */
+_mali_osk_errcode_t mali_dma_start(struct mali_dma_core* dma, mali_dma_cmd_buf *buf);
+
+/**
+ * @brief Create a DMA command
+ *
+ * @param core Mali core
+ * @param reg offset to register of core
+ * @param n number of registers to write
+ */
+MALI_STATIC_INLINE u32 mali_dma_command_write(struct mali_hw_core *core, u32 reg, u32 n)
+{
+       u32 core_offset = core->phys_offset;
+
+       MALI_DEBUG_ASSERT(reg < 0x2000);
+       MALI_DEBUG_ASSERT(n < 0x800);
+       MALI_DEBUG_ASSERT(core_offset < 0x30000);
+       MALI_DEBUG_ASSERT(0 == ((core_offset + reg) & ~0x7FFFF));
+
+       return (n << 20) | (core_offset + reg);
+}
+
+/**
+ * @brief Add a array write to DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param count Number of 4 byte words to write
+ */
+MALI_STATIC_INLINE void mali_dma_write_array(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+        u32 reg, u32 *data, u32 count)
+{
+       MALI_DEBUG_ASSERT((buf->size + 1 + count ) < MALI_DMA_CMD_BUF_SIZE / 4);
+
+       buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, count);
+
+       _mali_osk_memcpy(buf->virt_addr + buf->size, data, count * sizeof(*buf->virt_addr));
+
+       buf->size += count;
+}
+
+/**
+ * @brief Add a conditional array write to DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param count Number of 4 byte words to write
+ * @param ref Pointer to referance data that can be skipped if equal
+ */
+MALI_STATIC_INLINE void mali_dma_write_array_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+        u32 reg, u32 *data, u32 count, const u32 *ref)
+{
+       /* Do conditional array writes are not yet implemented, fallback to a
+        * normal array write. */
+       mali_dma_write_array(buf, core, reg, data, count);
+}
+
+/**
+ * @brief Add a conditional register write to the DMA command buffer
+ *
+ * If the data matches the reference the command will be skipped.
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param ref Pointer to referance data that can be skipped if equal
+ */
+MALI_STATIC_INLINE void mali_dma_write_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+        u32 reg, u32 data, const u32 ref)
+{
+       /* Skip write if reference value is equal to data. */
+       if (data == ref) return;
+
+       buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
+
+       buf->virt_addr[buf->size++] = data;
+
+       MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
+}
+
+/**
+ * @brief Add a register write to the DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ */
+MALI_STATIC_INLINE void mali_dma_write(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+                                       u32 reg, u32 data)
+{
+       buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
+
+       buf->virt_addr[buf->size++] = data;
+
+       MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
+}
+
+/**
+ * @brief Prepare DMA command buffer for use
+ *
+ * This function allocates the DMA buffer itself.
+ *
+ * @param buf The mali_dma_cmd_buf to prepare
+ * @return _MALI_OSK_ERR_OK if the \a buf is ready to use
+ */
+_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf);
+
+/**
+ * @brief Check if a DMA command buffer is ready for use
+ *
+ * @param buf The mali_dma_cmd_buf to check
+ * @return MALI_TRUE if buffer is usable, MALI_FALSE otherwise
+ */
+MALI_STATIC_INLINE mali_bool mali_dma_cmd_buf_is_valid(mali_dma_cmd_buf *buf)
+{
+       return NULL != buf->virt_addr;
+}
+
+/**
+ * @brief Return a DMA command buffer
+ *
+ * @param buf Pointer to DMA command buffer to return
+ */
+void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf);
+
+#endif /* __MALI_DMA_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp.c
new file mode 100644 (file)
index 0000000..dad73b2
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group)
+{
+       struct mali_gp_core* core = NULL;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+       MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+       core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+       if (NULL != core) {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
+                       _mali_osk_errcode_t ret;
+
+                       ret = mali_gp_reset(core);
+
+                       if (_MALI_OSK_ERR_OK == ret) {
+                               ret = mali_group_add_gp_core(group, core);
+                               if (_MALI_OSK_ERR_OK == ret) {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       core->irq = _mali_osk_irq_init(resource->irq,
+                                                                      mali_group_upper_half_gp,
+                                                                      group,
+                                                                      mali_gp_irq_probe_trigger,
+                                                                      mali_gp_irq_probe_ack,
+                                                                      core,
+                                                                      resource->description);
+                                       if (NULL != core->irq) {
+                                               MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+                                               mali_global_gp_core = core;
+
+                                               return core;
+                                       } else {
+                                               MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+                                       }
+                                       mali_group_remove_gp_core(group);
+                               } else {
+                                       MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+                               }
+                       }
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       } else {
+               MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       _mali_osk_irq_term(core->irq);
+       mali_hw_core_delete(&core->hw_core);
+       mali_global_gp_core = NULL;
+       _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Send the stop bus command. */
+       mali_gp_stop_bus(core);
+
+       /* Wait for bus to be stopped */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+       const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+       const u32 reset_invalid_value = 0xC0FFE000;
+       const u32 reset_check_value = 0xC01A0000;
+       const u32 reset_default_value = 0;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+       MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+       }
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+       int i;
+       u32 rawstat = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+               if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) {
+                       break;
+               }
+       }
+
+       if (i == MALI_REG_POLL_COUNT_FAST) {
+               MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+                                 core->hw_core.description, rawstat));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+       mali_gp_reset_async(core);
+       return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+       u32 startcmd = 0;
+       u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+       u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+       u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       if (mali_gp_job_has_vs_job(job)) {
+               startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+       }
+
+       if (mali_gp_job_has_plbu_job(job)) {
+               startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+       }
+
+       MALI_DEBUG_ASSERT(0 != startcmd);
+
+       mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+       /* Barrier to make sure the previous register write is finished */
+       _mali_osk_write_mem_barrier();
+
+       /* This is the command that starts the core. */
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+
+       /* Barrier to make sure the previous register write is finished */
+       _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+       u32 irq_readout;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+       if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+               MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+               _mali_osk_write_mem_barrier();
+       }
+       /*
+        * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+        * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+        */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+       return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+       struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+       _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+       struct mali_gp_core *core = (struct mali_gp_core *)data;
+       u32 irq_readout;
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+       if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+               _mali_osk_mem_barrier();
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+       return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+{
+       u32 val0 = 0;
+       u32 val1 = 0;
+       u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+       u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+               mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+#endif
+
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+               mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+#endif
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp.h
new file mode 100644 (file)
index 0000000..2de94cb
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core {
+       struct mali_hw_core  hw_core;           /**< Common for all HW cores */
+       _mali_osk_irq_t     *irq;               /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+
+/*** Accessor functions ***/
+MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+{
+       return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+{
+       /* Enable all interrupts, except those specified in irq_exceptions */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
+                                   MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_job.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_job.c
new file mode 100644 (file)
index 0000000..3c763aa
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER;          /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
+{
+       struct mali_gp_job *job;
+       u32 perf_counter_flag;
+
+       job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+       if (NULL != job) {
+               job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+               if (NULL == job->finished_notification) {
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+               if (NULL == job->oom_notification) {
+                       _mali_osk_notification_delete(job->finished_notification);
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) {
+                       _mali_osk_notification_delete(job->finished_notification);
+                       _mali_osk_notification_delete(job->oom_notification);
+                       _mali_osk_free(job);
+                       return NULL;
+               }
+
+               perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+               /* case when no counters came from user space
+                * so pass the debugfs / DS-5 provided global ones to the job object */
+               if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+                     (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+                       mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+                       mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+               }
+
+               _mali_osk_list_init(&job->list);
+               job->session = session;
+               job->id = id;
+               job->heap_current_addr = job->uargs.frame_registers[4];
+               job->perf_counter_value0 = 0;
+               job->perf_counter_value1 = 0;
+               job->pid = _mali_osk_get_pid();
+               job->tid = _mali_osk_get_tid();
+
+               job->pp_tracker = pp_tracker;
+               if (NULL != job->pp_tracker) {
+                       /* Take a reference on PP job's tracker that will be released when the GP
+                          job is done. */
+                       mali_timeline_system_tracker_get(session->timeline_system, pp_tracker);
+               }
+
+               mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job);
+               mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+               return job;
+       }
+
+       return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+
+       /* de-allocate the pre-allocated oom notifications */
+       if (NULL != job->oom_notification) {
+               _mali_osk_notification_delete(job->oom_notification);
+               job->oom_notification = NULL;
+       }
+       if (NULL != job->finished_notification) {
+               _mali_osk_notification_delete(job->finished_notification);
+               job->finished_notification = NULL;
+       }
+
+       _mali_osk_free(job);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+       return gp_counter_src0;
+}
+
+void mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+       gp_counter_src0 = counter;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+       return gp_counter_src1;
+}
+
+void mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+       gp_counter_src1 = counter;
+}
+
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if (NULL != job->pp_tracker) {
+               schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success);
+               job->pp_tracker = NULL;
+       }
+
+       return schedule_mask;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_job.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_job.h
new file mode 100644 (file)
index 0000000..8fb5cf5
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_timeline.h"
+#include "mali_scheduler_types.h"
+
+/**
+ * The structure represents a GP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job {
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       _mali_uk_gp_start_job_s uargs;                     /**< Arguments from user space */
+       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
+       u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
+       u32 heap_current_addr;                             /**< Holds the current HEAP address when the job has completed */
+       u32 perf_counter_value0;                           /**< Value of performance counter 0 (to be returned to user space) */
+       u32 perf_counter_value1;                           /**< Value of performance counter 1 (to be returned to user space) */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+       _mali_osk_notification_t *oom_notification;        /**< Notification sent back to userspace on OOM */
+       struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
+       struct mali_timeline_tracker *pp_tracker;          /**< Pointer to Timeline tracker for PP job that depends on this job. */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+void mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+void mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+       return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
+{
+       return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+       return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+       return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+       return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+       return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+       return job->tid;
+}
+
+MALI_STATIC_INLINE u32* mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+       return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+       return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+       return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+       return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+       return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+       job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+       return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+       return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+       return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+       return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+       return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+       job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+       job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+       job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+       job->perf_counter_value1 = value;
+}
+
+/**
+ * Returns MALI_TRUE if first job is after the second job, ordered by job ID.
+ *
+ * @param first First job.
+ * @param second Second job.
+ * @return MALI_TRUE if first job should be ordered after the second job, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_gp_job_is_after(struct mali_gp_job *first, struct mali_gp_job *second)
+{
+       /* A span is used to handle job ID wrapping. */
+       return (mali_gp_job_get_id(first) - mali_gp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN;
+}
+
+/**
+ * Release reference on tracker for PP job that depends on this GP job.
+ *
+ * @note If GP job has a reference on tracker, this function MUST be called before the GP job is
+ * deleted.
+ *
+ * @param job GP job that is done.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @return A scheduling bitmask indicating whether scheduling needs to be done.
+ */
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success);
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_scheduler.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_scheduler.c
new file mode 100644 (file)
index 0000000..b13e715
--- /dev/null
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+enum mali_gp_slot_state {
+       MALI_GP_SLOT_STATE_IDLE,
+       MALI_GP_SLOT_STATE_WORKING,
+       MALI_GP_SLOT_STATE_DISABLED,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot {
+       struct mali_group *group;
+       /*
+        * We keep track of the state here as well as in the group object
+        * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+        */
+       enum mali_gp_slot_state state;
+       u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue);      /* List of unscheduled jobs. */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue_high); /* List of unscheduled high priority jobs. */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void *data);
+static void mali_gp_scheduler_job_queued(void);
+static void mali_gp_scheduler_job_completed(void);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+static _mali_osk_spinlock_irq_t *gp_scheduler_lock = NULL;
+#else
+static _mali_osk_spinlock_t *gp_scheduler_lock = NULL;
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+       u32 num_groups;
+       u32 i;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       gp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#else
+       gp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+       if (NULL == gp_scheduler_lock) {
+               ret = _MALI_OSK_ERR_NOMEM;
+               goto cleanup;
+       }
+
+       gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == gp_scheduler_working_wait_queue) {
+               ret = _MALI_OSK_ERR_NOMEM;
+               goto cleanup;
+       }
+
+       /* Find all the available GP cores */
+       num_groups = mali_group_get_glob_num_groups();
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+               MALI_DEBUG_ASSERT(NULL != group);
+               if (NULL != group) {
+                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+                       if (NULL != gp_core) {
+                               if (0 == gp_version) {
+                                       /* Retrieve GP version */
+                                       gp_version = mali_gp_core_get_version(gp_core);
+                               }
+                               slot.group = group;
+                               slot.state = MALI_GP_SLOT_STATE_IDLE;
+                               break; /* There is only one GP, no point in looking for more */
+                       }
+               } else {
+                       ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+                       goto cleanup;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+
+cleanup:
+       if (NULL != gp_scheduler_working_wait_queue) {
+               _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+               gp_scheduler_working_wait_queue = NULL;
+       }
+
+       if (NULL != gp_scheduler_lock) {
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+               _mali_osk_spinlock_irq_term(gp_scheduler_lock);
+#else
+               _mali_osk_spinlock_term(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+               gp_scheduler_lock = NULL;
+       }
+
+       return ret;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+       MALI_DEBUG_ASSERT(   MALI_GP_SLOT_STATE_IDLE     == slot.state
+                            || MALI_GP_SLOT_STATE_DISABLED == slot.state);
+       MALI_DEBUG_ASSERT_POINTER(slot.group);
+       mali_group_delete(slot.group);
+
+       _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       _mali_osk_spinlock_irq_term(gp_scheduler_lock);
+#else
+       _mali_osk_spinlock_term(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       _mali_osk_spinlock_irq_lock(gp_scheduler_lock);
+#else
+       _mali_osk_spinlock_lock(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+       MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       _mali_osk_spinlock_irq_unlock(gp_scheduler_lock);
+#else
+       _mali_osk_spinlock_unlock(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+#if defined(DEBUG)
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock)
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() do {} while (0)
+#endif /* defined(DEBUG) */
+
+/* Group and scheduler must be locked when entering this function.  Both will be unlocked before
+ * exiting. */
+static void mali_gp_scheduler_schedule_internal_and_unlock(void)
+{
+       struct mali_gp_job *job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(slot.group->lock);
+       MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+       if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state ||
+           (_mali_osk_list_empty(&job_queue) && _mali_osk_list_empty(&job_queue_high))) {
+               mali_gp_scheduler_unlock();
+               mali_group_unlock(slot.group);
+               MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+                                    pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+               trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(), 0, 0, 0);
+#endif
+               return; /* Nothing to do, so early out */
+       }
+
+       /* Get next job in queue */
+       if (!_mali_osk_list_empty(&job_queue_high)) {
+               job = _MALI_OSK_LIST_ENTRY(job_queue_high.next, struct mali_gp_job, list);
+       } else {
+               MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job_queue));
+               job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Remove the job from queue */
+       _mali_osk_list_del(&job->list);
+
+       /* Mark slot as busy */
+       slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+       mali_gp_scheduler_unlock();
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+       mali_group_start_gp_job(slot.group, job);
+       mali_group_unlock(slot.group);
+}
+
+void mali_gp_scheduler_schedule(void)
+{
+       mali_group_lock(slot.group);
+       mali_gp_scheduler_lock();
+
+       mali_gp_scheduler_schedule_internal_and_unlock();
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+       _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
+       _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       if (MALI_TRUE == success) {
+               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+       } else {
+               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+       }
+
+       jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+       jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+       jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+       mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
+       job->finished_notification = NULL;
+
+       mali_gp_job_delete(job);
+       mali_gp_scheduler_job_completed();
+}
+
+/* Group must be locked when entering this function.  Will be unlocked before exiting. */
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       MALI_DEBUG_ASSERT(slot.group == group);
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+       /* Release tracker. */
+       schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+       /* Signal PP job. */
+       schedule_mask |= mali_gp_job_signal_pp_tracker(job, success);
+
+       mali_gp_scheduler_lock();
+
+       /* Mark slot as idle again */
+       slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+       /* If paused, then this was the last job, so wake up sleeping workers */
+       if (pause_count > 0) {
+               _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+       }
+
+       /* Schedule any queued GP jobs on this group. */
+       mali_gp_scheduler_schedule_internal_and_unlock();
+
+       /* GP is now scheduled, removing it from the mask. */
+       schedule_mask &= ~MALI_SCHEDULER_MASK_GP;
+
+       if (MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
+               /* Releasing the tracker activated other jobs that need scheduling. */
+               mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+       }
+
+       /* Sends the job end message to user space and free the job object */
+       mali_gp_scheduler_return_job_to_user(job, success);
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+       _mali_uk_gp_job_suspended_s * jobres;
+       _mali_osk_notification_t * notification;
+
+       mali_gp_scheduler_lock();
+
+       notification = job->oom_notification;
+       job->oom_notification = NULL;
+       slot.returned_cookie = mali_gp_job_get_id(job);
+
+       jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       jobres->cookie = mali_gp_job_get_id(job);
+
+       mali_gp_scheduler_unlock();
+
+       mali_session_send_notification(mali_gp_job_get_session(job), notification);
+
+       /*
+       * If this function failed, then we could return the job to user space right away,
+       * but there is a job timer anyway that will do that eventually.
+       * This is not exactly a common case anyway.
+       */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+       mali_gp_scheduler_lock();
+       pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+       mali_gp_scheduler_unlock();
+
+       _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended, NULL);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+       mali_gp_scheduler_lock();
+       pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+       mali_gp_scheduler_unlock();
+       if (0 == pause_count) {
+               mali_gp_scheduler_schedule();
+       }
+}
+
+mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job)
+{
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* We hold a PM reference for every job we hold queued (and running) */
+       _mali_osk_pm_dev_ref_add();
+
+       /* Add job to Timeline system. */
+       point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_GP);
+
+       return point;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *job;
+       mali_timeline_point point;
+       u32 __user *timeline_point_ptr = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data*)ctx;
+
+       job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(), NULL);
+       if (NULL == job) {
+               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+
+       point = mali_gp_scheduler_submit_job(session, job);
+
+       if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+               /* Let user space know that something failed after the job was started. */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       args->number_of_cores = 1;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       args->version = gp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *resumed_job;
+       _mali_osk_notification_t *new_notification = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       if (NULL == args->ctx) {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       session = (struct mali_session_data*)args->ctx;
+       if (NULL == session) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+               new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+               if (NULL == new_notification) {
+                       MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
+                       mali_group_lock(slot.group);
+                       mali_group_abort_gp_job(slot.group, args->cookie);
+                       mali_group_unlock(slot.group);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       mali_group_lock(slot.group);
+
+       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+
+               resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+               if (NULL != resumed_job) {
+                       resumed_job->oom_notification = new_notification;
+                       mali_group_unlock(slot.group);
+                       return _MALI_OSK_ERR_OK;
+               } else {
+                       mali_group_unlock(slot.group);
+                       _mali_osk_notification_delete(new_notification);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       MALI_DEBUG_PRINT(2, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+       mali_group_abort_gp_job(slot.group, args->cookie);
+       mali_group_unlock(slot.group);
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+       struct mali_gp_job *job, *tmp;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08X.\n", session));
+
+       mali_gp_scheduler_lock();
+
+       /* Find all jobs from the aborting session. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list) {
+               if (job->session == session) {
+                       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
+                       _mali_osk_list_move(&job->list, &removed_jobs);
+               }
+       }
+
+       /* Find all high priority jobs from the aborting session. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue_high, struct mali_gp_job, list) {
+               if (job->session == session) {
+                       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
+                       _mali_osk_list_move(&job->list, &removed_jobs);
+               }
+       }
+
+       mali_gp_scheduler_unlock();
+
+       /* Release and delete all found jobs from the aborting session. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &removed_jobs, struct mali_gp_job, list) {
+               mali_timeline_tracker_release(&job->tracker);
+               mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+               mali_gp_job_delete(job);
+               mali_gp_scheduler_job_completed();
+       }
+
+       /* Abort any running jobs from the session. */
+       mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void *data)
+{
+       mali_bool ret;
+
+       /* This callback does not use the data pointer. */
+       MALI_IGNORE(data);
+
+       mali_gp_scheduler_lock();
+       ret = pause_count > 0 && (slot.state == MALI_GP_SLOT_STATE_IDLE || slot.state == MALI_GP_SLOT_STATE_DISABLED);
+       mali_gp_scheduler_unlock();
+
+       return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+       n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue_high) ? "empty" : "not empty");
+
+       n += mali_group_dump_state(slot.group, buf + n, size - n);
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       return n;
+}
+#endif
+
+void mali_gp_scheduler_reset_all_groups(void)
+{
+       if (NULL != slot.group) {
+               mali_group_lock(slot.group);
+               mali_group_reset(slot.group);
+               mali_group_unlock(slot.group);
+       }
+}
+
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+       if (NULL != slot.group) {
+               mali_group_zap_session(slot.group, session);
+       }
+}
+
+void mali_gp_scheduler_enable_group(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(slot.group == group);
+       MALI_DEBUG_PRINT(2, ("Mali GP scheduler: enabling gp group %p\n", group));
+
+       mali_group_lock(group);
+
+       if (MALI_GROUP_STATE_DISABLED != group->state) {
+               mali_group_unlock(group);
+               MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already enabled\n", group));
+               return;
+       }
+
+       mali_gp_scheduler_lock();
+
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+       MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+       slot.state = MALI_GP_SLOT_STATE_IDLE;
+       group->state = MALI_GROUP_STATE_IDLE;
+
+       mali_group_power_on_group(group);
+       mali_group_reset(group);
+
+       /* Pick up any jobs that might have been queued while the GP group was disabled. */
+       mali_gp_scheduler_schedule_internal_and_unlock();
+}
+
+void mali_gp_scheduler_disable_group(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(slot.group == group);
+       MALI_DEBUG_PRINT(2, ("Mali GP scheduler: disabling gp group %p\n", group));
+
+       mali_gp_scheduler_suspend();
+       mali_group_lock(group);
+       mali_gp_scheduler_lock();
+
+       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE     == group->state
+                            || MALI_GROUP_STATE_DISABLED == group->state);
+
+       if (MALI_GROUP_STATE_DISABLED == group->state) {
+               MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+               MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already disabled\n", group));
+       } else {
+               MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
+               slot.state = MALI_GP_SLOT_STATE_DISABLED;
+               group->state = MALI_GROUP_STATE_DISABLED;
+
+               mali_group_power_off_group(group, MALI_TRUE);
+       }
+
+       mali_gp_scheduler_unlock();
+       mali_group_unlock(group);
+       mali_gp_scheduler_resume();
+}
+
+static mali_scheduler_mask mali_gp_scheduler_queue_job(struct mali_gp_job *job)
+{
+       _mali_osk_list_t *queue = NULL;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       struct mali_gp_job *iter, *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->session);
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
+
+       job->cache_order = mali_scheduler_get_new_cache_order();
+
+       /* Determine which queue the job should be added to. */
+       if (job->session->use_high_priority_job_queue) {
+               queue = &job_queue_high;
+       } else {
+               queue = &job_queue;
+       }
+
+       /* Find position in queue where job should be added. */
+       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_gp_job, list) {
+               if (mali_gp_job_is_after(job, iter)) {
+                       break;
+               }
+       }
+
+       /* Add job to queue. */
+       _mali_osk_list_add(&job->list, &iter->list);
+
+       /* Set schedule bitmask if the GP core is idle. */
+       if (MALI_GP_SLOT_STATE_IDLE == slot.state) {
+               schedule_mask |= MALI_SCHEDULER_MASK_GP;
+       }
+
+       mali_gp_scheduler_job_queued();
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_job_enqueue(mali_gp_job_get_tid(job), mali_gp_job_get_id(job), "GP");
+#endif
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
+
+       return schedule_mask;
+}
+
+mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->session);
+
+       MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n", mali_gp_job_get_id(job), job));
+
+       mali_gp_scheduler_lock();
+
+       if (unlikely(job->session->is_aborting)) {
+               /* Before checking if the session is aborting, the scheduler must be locked. */
+               MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) activated while session is aborting.\n", mali_gp_job_get_id(job), job));
+
+               /* This job should not be on any list. */
+               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+
+               mali_gp_scheduler_unlock();
+
+               /* Release tracker and delete job. */
+               mali_timeline_tracker_release(&job->tracker);
+               mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+               mali_gp_job_delete(job);
+               mali_gp_scheduler_job_completed();
+
+               /* Since we are aborting we ignore the scheduler mask. */
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       /* GP job is ready to run, queue it. */
+       schedule_mask = mali_gp_scheduler_queue_job(job);
+
+       mali_gp_scheduler_unlock();
+
+       return schedule_mask;
+}
+
+static void mali_gp_scheduler_job_queued(void)
+{
+       if (mali_utilization_enabled()) {
+               /*
+                * We cheat a little bit by counting the PP as busy from the time a GP job is queued.
+                * This will be fine because we only loose the tiny idle gap between jobs, but
+                * we will instead get less utilization work to do (less locks taken)
+                */
+               mali_utilization_gp_start();
+       }
+}
+
+static void mali_gp_scheduler_job_completed(void)
+{
+       /* Release the PM reference we got in the mali_gp_scheduler_job_queued() function */
+       _mali_osk_pm_dev_ref_dec();
+
+       if (mali_utilization_enabled()) {
+               mali_utilization_gp_end();
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_scheduler.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_gp_scheduler.h
new file mode 100644 (file)
index 0000000..154d063
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+/**
+ * @brief Abort all running and queued GP jobs from session.
+ *
+* This functions aborts all GP jobs from the specified session. Queued jobs are removed from the
+* queue and jobs currently running on a core will be aborted.
+ *
+ * @param session Session that is aborting.
+ */
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the GP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ */
+void mali_gp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session);
+
+/**
+ * @brief Re-enable a group that has been disabled with mali_gp_scheduler_disable_group
+ *
+ * If a Mali PMU is present, the group will be powered back on and added back
+ * into the GP scheduler.
+ *
+ * @param group Pointer to the group to enable
+ */
+void mali_gp_scheduler_enable_group(struct mali_group *group);
+
+/**
+ * @brief Disable a group
+ *
+ * The group will be taken out of the GP scheduler and powered off, if a Mali
+ * PMU is present.
+ *
+ * @param group Pointer to the group to disable
+ */
+void mali_gp_scheduler_disable_group(struct mali_group *group);
+
+/**
+ * @brief Used by the Timeline system to queue a GP job.
+ *
+ * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
+ * call.
+ */
+mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job);
+
+/**
+ * @brief Schedule queued jobs on idle cores.
+ */
+void mali_gp_scheduler_schedule(void);
+
+/**
+ * @brief Submit a GP job to the GP scheduler.
+ *
+ * This will add the GP job to the Timeline system.
+ *
+ * @param session Session this job belongs to.
+ * @param job GP job that will be submitted
+ * @return Point on GP timeline for job.
+ */
+mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_group.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_group.c
new file mode 100644 (file)
index 0000000..c60c5bf
--- /dev/null
@@ -0,0 +1,1855 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_scheduler.h"
+#include "mali_osk_profiling.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+/* timer related */
+int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
+
+/* local helper functions */
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
+
+static void mali_group_post_process_job_pp(struct mali_group *group);
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
+
+void mali_group_lock(struct mali_group *group)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_lock(group->lock);
+#else
+       _mali_osk_spinlock_lock(group->lock);
+#endif
+       MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+       MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_unlock(group->lock);
+#else
+       _mali_osk_spinlock_unlock(group->lock);
+#endif
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+{
+       struct mali_group *group = NULL;
+
+       if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+               MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+               return NULL;
+       }
+
+       group = _mali_osk_calloc(1, sizeof(struct mali_group));
+       if (NULL != group) {
+               group->timeout_timer = _mali_osk_timer_init();
+
+               if (NULL != group->timeout_timer) {
+                       _mali_osk_lock_order_t order;
+                       _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+                       if (NULL != dlbu) {
+                               order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
+                       } else {
+                               order = _MALI_OSK_LOCK_ORDER_GROUP;
+                       }
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+                       group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
+#else
+                       group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
+#endif
+
+                       if (NULL != group->lock) {
+                               group->l2_cache_core[0] = core;
+                               group->session = NULL;
+                               group->power_is_on = MALI_TRUE;
+                               group->state = MALI_GROUP_STATE_IDLE;
+                               _mali_osk_list_init(&group->group_list);
+                               _mali_osk_list_init(&group->pp_scheduler_list);
+                               group->parent_group = NULL;
+                               group->l2_cache_core_ref_count[0] = 0;
+                               group->l2_cache_core_ref_count[1] = 0;
+                               group->bcast_core = bcast;
+                               group->dlbu_core = dlbu;
+
+                               mali_global_groups[mali_global_num_groups] = group;
+                               mali_global_num_groups++;
+
+                               return group;
+                       }
+                       _mali_osk_timer_term(group->timeout_timer);
+               }
+               _mali_osk_free(group);
+       }
+
+       return NULL;
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core)
+{
+       /* This group object now owns the MMU core object */
+       group->mmu= mmu_core;
+       group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+       if (NULL == group->bottom_half_work_mmu) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+       /* This group object no longer owns the MMU core object */
+       group->mmu = NULL;
+       if (NULL != group->bottom_half_work_mmu) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
+{
+       /* This group object now owns the GP core object */
+       group->gp_core = gp_core;
+       group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+       if (NULL == group->bottom_half_work_gp) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+       /* This group object no longer owns the GP core object */
+       group->gp_core = NULL;
+       if (NULL != group->bottom_half_work_gp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
+{
+       /* This group object now owns the PP core object */
+       group->pp_core = pp_core;
+       group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+       if (NULL == group->bottom_half_work_pp) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+       /* This group object no longer owns the PP core object */
+       group->pp_core = NULL;
+       if (NULL != group->bottom_half_work_pp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
+}
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
+{
+       group->pm_domain = domain;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+       u32 i;
+
+       MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+
+       MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+       /* Delete the resources that this group owns */
+       if (NULL != group->gp_core) {
+               mali_gp_delete(group->gp_core);
+       }
+
+       if (NULL != group->pp_core) {
+               mali_pp_delete(group->pp_core);
+       }
+
+       if (NULL != group->mmu) {
+               mali_mmu_delete(group->mmu);
+       }
+
+       if (mali_group_is_virtual(group)) {
+               /* Remove all groups from virtual group */
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       child->parent_group = NULL;
+                       mali_group_delete(child);
+               }
+
+               mali_dlbu_delete(group->dlbu_core);
+
+               if (NULL != group->bcast_core) {
+                       mali_bcast_unit_delete(group->bcast_core);
+               }
+       }
+
+       for (i = 0; i < mali_global_num_groups; i++) {
+               if (mali_global_groups[i] == group) {
+                       mali_global_groups[i] = NULL;
+                       mali_global_num_groups--;
+
+                       if (i != mali_global_num_groups) {
+                               /* We removed a group from the middle of the array -- move the last
+                                * group to the current position to close the gap */
+                               mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+                               mali_global_groups[mali_global_num_groups] = NULL;
+                       }
+
+                       break;
+               }
+       }
+
+       if (NULL != group->timeout_timer) {
+               _mali_osk_timer_del(group->timeout_timer);
+               _mali_osk_timer_term(group->timeout_timer);
+       }
+
+       if (NULL != group->bottom_half_work_mmu) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
+
+       if (NULL != group->bottom_half_work_gp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
+
+       if (NULL != group->bottom_half_work_pp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_term(group->lock);
+#else
+       _mali_osk_spinlock_term(group->lock);
+#endif
+       _mali_osk_free(group);
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+       u32 i;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+       i = 0;
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+               MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+               i++;
+       }
+})
+
+/**
+ * @brief Add child group to virtual group parent
+ *
+ * Before calling this function, child must have it's state set to JOINING_VIRTUAL
+ * to ensure it's not touched during the transition period. When this function returns,
+ * child's state will be IN_VIRTUAL.
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+{
+       mali_bool found;
+       u32 i;
+       struct mali_session_data *child_session;
+
+       MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
+
+       MALI_ASSERT_GROUP_LOCKED(parent);
+
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(NULL == child->parent_group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
+
+       _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+       child->state = MALI_GROUP_STATE_IN_VIRTUAL;
+       child->parent_group = parent;
+
+       MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+       MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+       MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+       /* Keep track of the L2 cache cores of child groups */
+       found = MALI_FALSE;
+       for (i = 0; i < 2; i++) {
+               if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+                       MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+                       parent->l2_cache_core_ref_count[i]++;
+                       found = MALI_TRUE;
+               }
+       }
+
+       if (!found) {
+               /* First time we see this L2 cache, add it to our list */
+               i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+               MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+               MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+               parent->l2_cache_core[i] = child->l2_cache_core[0];
+               parent->l2_cache_core_ref_count[i]++;
+       }
+
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_add_group(parent->bcast_core, child);
+       mali_dlbu_add_group(parent->dlbu_core, child);
+
+       child_session = child->session;
+       child->session = NULL;
+
+       /* Above this comment, only software state is updated and the HW is not
+        * touched. Now, check if Mali is powered and skip the rest if it isn't
+        * powered.
+        */
+
+       if (!update_hw) {
+               MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+               return;
+       }
+
+       /* Update MMU */
+       if (parent->session == child_session) {
+               mali_mmu_zap_tlb(child->mmu);
+       } else {
+               if (NULL == parent->session) {
+                       mali_mmu_activate_empty_page_directory(child->mmu);
+               } else {
+                       mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+               }
+       }
+
+       /* Update HW only if power is on */
+       mali_bcast_reset(parent->bcast_core);
+       mali_dlbu_update_mask(parent->dlbu_core);
+
+       /* Start job on child when parent is active */
+       if (NULL != parent->pp_running_job) {
+               struct mali_pp_job *job = parent->pp_running_job;
+               MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+                                    child, mali_pp_job_get_id(job), parent));
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
+               mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+       }
+
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ *
+ * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
+ * to IDLE before it can be used.
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+       u32 i;
+
+       MALI_ASSERT_GROUP_LOCKED(parent);
+
+       MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(parent == child->parent_group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
+       /* Removing groups while running is not yet supported. */
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
+
+       mali_group_lock(child);
+
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_remove_group(parent->bcast_core, child);
+       mali_dlbu_remove_group(parent->dlbu_core, child);
+
+       /* Update HW only if power is on */
+       if (mali_pm_is_power_on()) {
+               mali_bcast_reset(parent->bcast_core);
+               mali_dlbu_update_mask(parent->dlbu_core);
+       }
+
+       _mali_osk_list_delinit(&child->group_list);
+
+       child->session = parent->session;
+       child->parent_group = NULL;
+       child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+       /* Keep track of the L2 cache cores of child groups */
+       i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+       MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+       parent->l2_cache_core_ref_count[i]--;
+
+       if (parent->l2_cache_core_ref_count[i] == 0) {
+               parent->l2_cache_core[i] = NULL;
+       }
+
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+
+       mali_group_unlock(child);
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+       struct mali_group *child;
+
+       MALI_ASSERT_GROUP_LOCKED(parent);
+
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
+
+       child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+
+       mali_group_remove_group(parent, child);
+
+       return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+       /*
+        * This function should not be used to abort jobs,
+        * currently only called during insmod and PM resume
+        */
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+       MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+       group->session = NULL;
+
+       if (NULL != group->dlbu_core) {
+               mali_dlbu_reset(group->dlbu_core);
+       }
+
+       if (NULL != group->bcast_core) {
+               mali_bcast_reset(group->bcast_core);
+       }
+
+       if (NULL != group->mmu) {
+               mali_group_reset_mmu(group);
+       }
+
+       if (NULL != group->gp_core) {
+               mali_gp_reset(group->gp_core);
+       }
+
+       if (NULL != group->pp_core) {
+               mali_group_reset_pp(group);
+       }
+}
+
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
+{
+       return group->gp_core;
+}
+
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
+{
+       return group->pp_core;
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+       struct mali_session_data *session;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+       session = mali_gp_job_get_session(job);
+
+       if (NULL != group->l2_cache_core[0]) {
+               mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+       }
+
+       mali_group_activate_page_directory(group, session);
+
+       mali_gp_job_start(group->gp_core, job);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                     mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+               mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
+                              mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+#endif
+
+       group->gp_running_job = job;
+       group->state = MALI_GROUP_STATE_WORKING;
+
+       /* Setup the timeout timer value and save the job id for the job running on the gp core */
+       _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+       struct mali_session_data *session;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+       session = mali_pp_job_get_session(job);
+
+       if (NULL != group->l2_cache_core[0]) {
+               mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+       }
+
+       if (NULL != group->l2_cache_core[1]) {
+               mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+       }
+
+       mali_group_activate_page_directory(group, session);
+
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
+               u32 core_num = 0;
+
+               MALI_DEBUG_ASSERT( mali_pp_job_is_virtual(job));
+
+               /* Configure DLBU for the job */
+               mali_dlbu_config_job(group->dlbu_core, job);
+
+               /* Write stack address for each child group */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       mali_pp_write_addr_stack(child->pp_core, job);
+                       core_num++;
+               }
+
+               /* Try to use DMA unit to start job, fallback to writing directly to the core */
+               MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
+               if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
+                       mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+               }
+       } else {
+               mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+       }
+
+       /* if the group is virtual, loop through physical groups which belong to this group
+        * and call profiling events for its cores as virtual */
+       if (MALI_TRUE == mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                                     mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                     mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+               }
+#if defined(CONFIG_MALI400_PROFILING)
+               if (0 != group->l2_cache_core_ref_count[0]) {
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
+               }
+               if (0 != group->l2_cache_core_ref_count[1]) {
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                       }
+               }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+       } else { /* group is physical - call profiling events for physical cores */
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+               }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+       }
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+#endif
+       group->pp_running_job = job;
+       group->pp_running_sub_job = sub_job;
+       group->state = MALI_GROUP_STATE_WORKING;
+
+       /* Setup the timeout timer value and save the job id for the job running on the pp core */
+       _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (group->state != MALI_GROUP_STATE_OOM ||
+           mali_gp_job_get_id(group->gp_running_job) != job_id) {
+               return NULL; /* Illegal request or job has already been aborted */
+       }
+
+       if (NULL != group->l2_cache_core[0]) {
+               mali_l2_cache_invalidate(group->l2_cache_core[0]);
+       }
+
+       mali_mmu_zap_tlb_without_stall(group->mmu);
+
+       mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+
+       group->state = MALI_GROUP_STATE_WORKING;
+
+       return group->gp_running_job;
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+       struct mali_group *child;
+       struct mali_group *temp;
+       _mali_osk_errcode_t err;
+
+       if (!mali_group_is_virtual(group)) {
+               /* This is a physical group or an idle virtual group -- simply wait for
+                * the reset to complete. */
+               err = mali_mmu_reset(group->mmu);
+               MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+       } else { /* virtual group */
+               err = mali_mmu_reset(group->mmu);
+               if (_MALI_OSK_ERR_OK == err) {
+                       return;
+               }
+
+               /* Loop through all members of this virtual group and wait
+                * until they are done resetting.
+                */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       err = mali_mmu_reset(child->mmu);
+                       MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+               }
+       }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+       struct mali_group *child;
+       struct mali_group *temp;
+
+       mali_pp_reset_async(group->pp_core);
+
+       if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+               /* This is a physical group or an idle virtual group -- simply wait for
+                * the reset to complete. */
+               mali_pp_reset_wait(group->pp_core);
+       } else { /* virtual group */
+               /* Loop through all members of this virtual group and wait until they
+                * are done resetting.
+                */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       mali_pp_reset_wait(child->pp_core);
+               }
+       }
+}
+
+/* Group must be locked when entering this function.  Will be unlocked before exiting. */
+static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
+{
+       struct mali_pp_job *pp_job_to_return;
+       u32 pp_sub_job_to_return;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       mali_group_post_process_job_pp(group);
+
+       if (success) {
+               /* Only do soft reset for successful jobs, a full recovery
+                * reset will be done for failed jobs. */
+               mali_pp_reset_async(group->pp_core);
+       }
+
+       pp_job_to_return = group->pp_running_job;
+       pp_sub_job_to_return = group->pp_running_sub_job;
+       group->state = MALI_GROUP_STATE_IDLE;
+       group->pp_running_job = NULL;
+
+       if (!success) {
+               MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+               mali_group_recovery_reset(group);
+       } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+               MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+               mali_group_recovery_reset(group);
+       }
+
+       /* Return job to user, schedule and unlock group. */
+       mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
+}
+
+/* Group must be locked when entering this function.  Will be unlocked before exiting. */
+static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
+{
+       struct mali_gp_job *gp_job_to_return;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       mali_group_post_process_job_gp(group, MALI_FALSE);
+
+       if (success) {
+               /* Only do soft reset for successful jobs, a full recovery
+                * reset will be done for failed jobs. */
+               mali_gp_reset_async(group->gp_core);
+       }
+
+       gp_job_to_return = group->gp_running_job;
+       group->state = MALI_GROUP_STATE_IDLE;
+       group->gp_running_job = NULL;
+
+       if (!success) {
+               MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+               mali_group_recovery_reset(group);
+       } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+               MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+               mali_group_recovery_reset(group);
+       }
+
+       /* Return job to user, schedule and unlock group. */
+       mali_gp_scheduler_job_done(group, gp_job_to_return, success);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (MALI_GROUP_STATE_IDLE == group->state ||
+           mali_gp_job_get_id(group->gp_running_job) != job_id) {
+               return; /* No need to cancel or job has already been aborted or completed */
+       }
+
+       /* Function will unlock the group, so we need to lock it again */
+       mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+       mali_group_lock(group);
+}
+
+static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (MALI_GROUP_STATE_IDLE == group->state ||
+           mali_pp_job_get_id(group->pp_running_job) != job_id) {
+               return; /* No need to cancel or job has already been aborted or completed */
+       }
+
+       mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+       mali_group_lock(group);
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+       struct mali_gp_job *gp_job;
+       struct mali_pp_job *pp_job;
+       u32 gp_job_id = 0;
+       u32 pp_job_id = 0;
+       mali_bool abort_pp = MALI_FALSE;
+       mali_bool abort_gp = MALI_FALSE;
+
+       mali_group_lock(group);
+
+       if (mali_group_is_in_virtual(group)) {
+               /* Group is member of a virtual group, don't touch it! */
+               mali_group_unlock(group);
+               return;
+       }
+
+       gp_job = group->gp_running_job;
+       pp_job = group->pp_running_job;
+
+       if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
+               MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+               gp_job_id = mali_gp_job_get_id(gp_job);
+               abort_gp = MALI_TRUE;
+       }
+
+       if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
+               MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+               pp_job_id = mali_pp_job_get_id(pp_job);
+               abort_pp = MALI_TRUE;
+       }
+
+       if (abort_gp) {
+               mali_group_abort_gp_job(group, gp_job_id);
+       }
+       if (abort_pp) {
+               mali_group_abort_pp_job(group, pp_job_id);
+       }
+
+       mali_group_remove_session_if_unused(group, session);
+
+       mali_group_unlock(group);
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+       if(mali_global_num_groups > index) {
+               return mali_global_groups[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+       return mali_global_num_groups;
+}
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+
+       if (group->session != session) {
+               /* Different session than last time, so we need to do some work */
+               MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
+               mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+               group->session = session;
+       } else {
+               /* Same session as last time, so no work required */
+               MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
+               mali_mmu_zap_tlb_without_stall(group->mmu);
+       }
+}
+
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (MALI_GROUP_STATE_IDLE == group->state) {
+               if (group->session == session) {
+                       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
+                       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+                       MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+                       mali_mmu_activate_empty_page_directory(group->mmu);
+                       group->session = NULL;
+               }
+       }
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       return group->power_is_on;
+}
+
+void mali_group_power_on_group(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE       == group->state
+                            || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_DISABLED   == group->state);
+
+       MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
+
+       group->power_is_on = MALI_TRUE;
+}
+
+void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE       == group->state
+                            || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_DISABLED   == group->state);
+
+       MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
+
+       /* It is necessary to set group->session = NULL so that the powered off MMU is not written
+        * to on map/unmap.  It is also necessary to set group->power_is_on = MALI_FALSE so that
+        * pending bottom_halves does not access powered off cores. */
+
+       group->session = NULL;
+
+       if (do_power_change) {
+               group->power_is_on = MALI_FALSE;
+       }
+}
+
+void mali_group_power_on(void)
+{
+       int i;
+       for (i = 0; i < mali_global_num_groups; i++) {
+               struct mali_group *group = mali_global_groups[i];
+
+               mali_group_lock(group);
+               if (MALI_GROUP_STATE_DISABLED == group->state) {
+                       MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+               } else {
+                       mali_group_power_on_group(group);
+               }
+               mali_group_unlock(group);
+       }
+       MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
+}
+
+void mali_group_power_off(mali_bool do_power_change)
+{
+       int i;
+
+       for (i = 0; i < mali_global_num_groups; i++) {
+               struct mali_group *group = mali_global_groups[i];
+
+               mali_group_lock(group);
+               if (MALI_GROUP_STATE_DISABLED == group->state) {
+                       MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+               } else {
+                       mali_group_power_off_group(group, do_power_change);
+               }
+               mali_group_unlock(group);
+       }
+       MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       /* Stop cores, bus stop */
+       if (NULL != group->pp_core) {
+               mali_pp_stop_bus(group->pp_core);
+       } else {
+               mali_gp_stop_bus(group->gp_core);
+       }
+
+       /* Flush MMU and clear page fault (if any) */
+       mali_mmu_activate_fault_flush_page_directory(group->mmu);
+       mali_mmu_page_fault_done(group->mmu);
+
+       /* Wait for cores to stop bus, then do a hard reset on them */
+       if (NULL != group->pp_core) {
+               if (mali_group_is_virtual(group)) {
+                       struct mali_group *child, *temp;
+
+                       /* Disable the broadcast unit while we do reset directly on the member cores. */
+                       mali_bcast_disable(group->bcast_core);
+
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               mali_pp_stop_bus_wait(child->pp_core);
+                               mali_pp_hard_reset(child->pp_core);
+                       }
+
+                       mali_bcast_enable(group->bcast_core);
+               } else {
+                       mali_pp_stop_bus_wait(group->pp_core);
+                       mali_pp_hard_reset(group->pp_core);
+               }
+       } else {
+               mali_gp_stop_bus_wait(group->gp_core);
+               mali_gp_hard_reset(group->gp_core);
+       }
+
+       /* Reset MMU */
+       err = mali_mmu_reset(group->mmu);
+       MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+       MALI_IGNORE(err);
+
+       group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+       n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
+       if (group->gp_core) {
+               n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+       }
+       if (group->pp_core) {
+               n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+                                       group->pp_running_job, group->pp_running_sub_job);
+       }
+
+       return n;
+}
+#endif
+
+/* Group must be locked when entering this function.  Will be unlocked before exiting. */
+static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       if (NULL != group->pp_core) {
+               struct mali_pp_job *pp_job_to_return;
+               u32 pp_sub_job_to_return;
+
+               MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+
+               mali_group_post_process_job_pp(group);
+
+               pp_job_to_return = group->pp_running_job;
+               pp_sub_job_to_return = group->pp_running_sub_job;
+               group->state = MALI_GROUP_STATE_IDLE;
+               group->pp_running_job = NULL;
+
+               mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+               /* Will unlock group. */
+               mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
+       } else {
+               struct mali_gp_job *gp_job_to_return;
+
+               MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+
+               mali_group_post_process_job_gp(group, MALI_FALSE);
+
+               gp_job_to_return = group->gp_running_job;
+               group->state = MALI_GROUP_STATE_IDLE;
+               group->gp_running_job = NULL;
+
+               mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+               /* Will unlock group. */
+               mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+       }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_mmu_core *mmu = group->mmu;
+       u32 int_stat;
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+               goto out;
+       }
+#endif
+
+       /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+       int_stat = mali_mmu_get_int_status(mmu);
+       if (0 != int_stat) {
+               struct mali_group *parent = group->parent_group;
+
+               /* page fault or bus error, we thread them both in the same way */
+               mali_mmu_mask_all_interrupts(mmu);
+               if (NULL == parent) {
+                       _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+               } else {
+                       _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
+               }
+               err = _MALI_OSK_ERR_OK;
+               goto out;
+       }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+       return err;
+}
+
+static void mali_group_bottom_half_mmu(void * data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_mmu_core *mmu = group->mmu;
+       u32 rawstat;
+       MALI_DEBUG_CODE(u32 status);
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       mali_group_lock(group);
+
+       MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+       if ( MALI_FALSE == mali_group_power_is_on(group) ) {
+               MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
+               mali_group_unlock(group);
+               return;
+       }
+
+       rawstat = mali_mmu_get_rawstat(mmu);
+       MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
+
+       MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
+
+       if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+               /* An actual page fault has occurred. */
+#ifdef DEBUG
+               u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
+               MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+                                   (void*)fault_address,
+                                   (status >> 6) & 0x1F,
+                                   (status & 32) ? "write" : "read",
+                                   mmu->hw_core.description));
+#endif
+
+               mali_group_mmu_page_fault_and_unlock(group);
+               return;
+       }
+
+       mali_group_unlock(group);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_gp_core *core = group->gp_core;
+       u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+               goto out;
+       }
+#endif
+
+       irq_readout = mali_gp_get_int_stat(core);
+
+       if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
+               /* Mask out all IRQs from this core until IRQ is handled */
+               mali_gp_mask_all_interrupts(core);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+
+               /* We do need to handle this in a bottom half */
+               _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+
+               err = _MALI_OSK_ERR_OK;
+               goto out;
+       }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+       return err;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       u32 irq_readout;
+       u32 irq_errors;
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
+
+       mali_group_lock(group);
+
+       if ( MALI_FALSE == mali_group_power_is_on(group) ) {
+               MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
+               mali_group_unlock(group);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       irq_readout = mali_gp_read_rawstat(group->gp_core);
+
+       MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+
+       if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
+               u32 core_status = mali_gp_read_core_status(group->gp_core);
+               if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
+                       MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
+                       group->core_timed_out = MALI_FALSE;
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                                     0, _mali_osk_get_tid(), 0, 0, 0);
+
+                       mali_group_complete_gp_and_unlock(group, MALI_TRUE);
+                       return;
+               }
+       }
+
+       /*
+        * Now lets look at the possible error cases (IRQ indicating error or timeout)
+        * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+        */
+       irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+       if (0 != irq_errors) {
+               MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+               group->core_timed_out = MALI_FALSE;
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+
+               mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+               return;
+       } else if (group->core_timed_out) { /* SW timeout */
+               group->core_timed_out = MALI_FALSE;
+               if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
+                       MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
+
+                       mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+                       return;
+               }
+       } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+               /* GP wants more memory in order to continue. */
+               MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
+
+               group->state = MALI_GROUP_STATE_OOM;
+               mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+               mali_gp_scheduler_oom(group, group->gp_running_job);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       /*
+        * The only way to get here is if we only got one of two needed END_CMD_LST
+        * interrupts. Enable all but not the complete interrupt that has been
+        * received and continue to run.
+        */
+       mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
+       mali_group_unlock(group);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
+{
+       /* Stop the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
+
+       if (NULL == group->gp_running_job) {
+               /* Nothing to do */
+               return;
+       }
+
+       mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
+
+#if defined(CONFIG_MALI400_PROFILING)
+       if (suspend) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                                             0, 0);
+       } else {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                                             0, 0);
+
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       mali_group_report_l2_cache_counters_per_core(group, 0);
+       }
+#endif
+
+       mali_gp_job_set_current_heap_addr(group->gp_running_job,
+                                         mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_pp_core *core = group->pp_core;
+       u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+               goto out;
+       }
+#endif
+
+       /*
+        * For Mali-450 there is one particular case we need to watch out for:
+        *
+        * Criteria 1) this function call can be due to a shared interrupt,
+        * and not necessary because this core signaled an interrupt.
+        * Criteria 2) this core is a part of a virtual group, and thus it should
+        * not do any post processing.
+        * Criteria 3) this core has actually indicated that is has completed by
+        * having set raw_stat/int_stat registers to != 0
+        *
+        * If all this criteria is meet, then we could incorrectly start post
+        * processing on the wrong group object (this should only happen on the
+        * parent group)
+        */
+#if !defined(MALI_UPPER_HALF_SCHEDULING)
+       if (mali_group_is_in_virtual(group)) {
+               /*
+                * This check is done without the group lock held, which could lead to
+                * a potential race. This is however ok, since we will safely re-check
+                * this with the group lock held at a later stage. This is just an
+                * early out which will strongly benefit shared IRQ systems.
+                */
+               err = _MALI_OSK_ERR_OK;
+               goto out;
+       }
+#endif
+
+       irq_readout = mali_pp_get_int_stat(core);
+       if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
+               /* Mask out all IRQs from this core until IRQ is handled */
+               mali_pp_mask_all_interrupts(core);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               /* Currently no support for this interrupt event for the virtual PP core */
+               if (!mali_group_is_virtual(group)) {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
+                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
+                                                     irq_readout, 0, 0, 0, 0);
+               }
+#endif
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+               /* Check if job is complete without errors */
+               if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+                       MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
+
+                       mali_group_lock(group);
+
+                       /* Check if job is complete without errors, again, after taking the group lock */
+                       irq_readout = mali_pp_read_rawstat(core);
+                       if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
+                               mali_pp_enable_interrupts(core);
+                               mali_group_unlock(group);
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                             0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+                               err = _MALI_OSK_ERR_OK;
+                               goto out;
+                       }
+
+                       if (mali_group_is_virtual(group)) {
+                               u32 status_readout = mali_pp_read_status(group->pp_core);
+                               if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
+                                       MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+                                       mali_pp_enable_interrupts(core);
+                                       mali_group_unlock(group);
+                                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+                                       err = _MALI_OSK_ERR_OK;
+                                       goto out;
+                               }
+                       }
+
+                       if (mali_group_is_in_virtual(group)) {
+                               /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+                               mali_pp_enable_interrupts(core);
+                               mali_group_unlock(group);
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                             0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+                               err =  _MALI_OSK_ERR_FAULT;
+                               goto out;
+                       }
+
+                       group->core_timed_out = MALI_FALSE;
+
+                       mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
+
+                       /* No need to enable interrupts again, since the core will be reset while completing the job */
+
+                       MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
+
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+                       err = _MALI_OSK_ERR_OK;
+                       goto out;
+               }
+#endif
+
+               /* We do need to handle this in a bottom half */
+               _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+               err = _MALI_OSK_ERR_OK;
+               goto out;
+       }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+       return err;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       struct mali_pp_core *core = group->pp_core;
+       u32 irq_readout;
+       u32 irq_errors;
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+       mali_group_lock(group);
+
+       if (mali_group_is_in_virtual(group)) {
+               /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+               mali_pp_enable_interrupts(core);
+               mali_group_unlock(group);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       if ( MALI_FALSE == mali_group_power_is_on(group) ) {
+               MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
+               mali_group_unlock(group);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       irq_readout = mali_pp_read_rawstat(group->pp_core);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+
+       /* Check if job is complete without errors */
+       if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
+               if (mali_group_is_virtual(group)) {
+                       u32 status_readout = mali_pp_read_status(group->pp_core);
+
+                       if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
+                               MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+                               mali_pp_enable_interrupts(core);
+                               mali_group_unlock(group);
+
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                                             0, _mali_osk_get_tid(), 0, 0, 0);
+                               return;
+                       }
+               }
+
+               if (!group->core_timed_out) {
+                       MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+                       group->core_timed_out = MALI_FALSE;
+
+                       mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
+
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                                     0, _mali_osk_get_tid(), 0, 0, 0);
+                       return;
+               }
+       }
+
+       /*
+        * Now lets look at the possible error cases (IRQ indicating error or timeout)
+        * END_OF_FRAME and HANG interrupts are not considered error.
+        */
+       irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
+       if (0 != irq_errors) {
+               MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
+                                 irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+               group->core_timed_out = MALI_FALSE;
+
+               mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       } else if (group->core_timed_out) { /* SW timeout */
+               group->core_timed_out = MALI_FALSE;
+               if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
+                       MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
+                                   mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
+
+                       mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+               } else {
+                       mali_group_unlock(group);
+               }
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), 0, 0, 0);
+               return;
+       }
+
+       /*
+        * We should never get here, re-enable interrupts and continue
+        */
+       if (0 == irq_readout) {
+               MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
+                                    mali_pp_get_hw_core_desc(group->pp_core)));
+       } else {
+               MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
+                                 mali_pp_get_hw_core_desc(group->pp_core)));
+       }
+       mali_pp_enable_interrupts(core);
+       mali_group_unlock(group);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_pp(struct mali_group *group)
+{
+       MALI_ASSERT_GROUP_LOCKED(group);
+
+       /* Stop the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
+
+       if (NULL != group->pp_running_job) {
+               if (MALI_TRUE == mali_group_is_virtual(group)) {
+                       struct mali_group *child;
+                       struct mali_group *temp;
+
+                       /* update performance counters from each physical pp core within this virtual group */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+                       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       /* send profiling data per physical core */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
+                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                             mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                                                             0, 0);
+                       }
+                       if (0 != group->l2_cache_core_ref_count[0]) {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                               }
+                       }
+                       if (0 != group->l2_cache_core_ref_count[1]) {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                               }
+                       }
+
+#endif
+               } else {
+                       /* update performance counters for a physical group's pp core */
+                       mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                                     mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                                                     0, 0);
+
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
+#endif
+               }
+       }
+}
+
+static void mali_group_timeout(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+
+       group->core_timed_out = MALI_TRUE;
+
+       if (NULL != group->gp_core) {
+               MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
+               _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+       } else {
+               MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
+               _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+       }
+}
+
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       /* Early out - safe even if mutex is not held */
+       if (group->session != session) return;
+
+       mali_group_lock(group);
+
+       mali_group_remove_session_if_unused(group, session);
+
+       if (group->session == session) {
+               /* The Zap also does the stall and disable_stall */
+               mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+               if (MALI_TRUE != zap_success) {
+                       MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+
+                       mali_group_mmu_page_fault_and_unlock(group);
+                       return;
+               }
+       }
+
+       mali_group_unlock(group);
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+       u32 source0 = 0;
+       u32 value0 = 0;
+       u32 source1 = 0;
+       u32 value1 = 0;
+       u32 profiling_channel = 0;
+
+       switch(core_num) {
+       case 0:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+               break;
+       case 1:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+               break;
+       case 2:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+               break;
+       default:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+               break;
+       }
+
+       if (0 == core_num) {
+               mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+       }
+       if (1 == core_num) {
+               if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+               } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+               }
+       }
+       if (2 == core_num) {
+               if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+               } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+               }
+       }
+
+       _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+mali_bool mali_group_is_enabled(struct mali_group *group)
+{
+       mali_bool enabled = MALI_TRUE;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_group_lock(group);
+       if (MALI_GROUP_STATE_DISABLED == group->state) {
+               enabled = MALI_FALSE;
+       }
+       mali_group_unlock(group);
+
+       return enabled;
+}
+
+void mali_group_enable(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(   NULL != mali_group_get_pp_core(group)
+                            || NULL != mali_group_get_gp_core(group));
+
+       if (NULL != mali_group_get_pp_core(group)) {
+               mali_pp_scheduler_enable_group(group);
+       } else {
+               mali_gp_scheduler_enable_group(group);
+       }
+}
+
+void mali_group_disable(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(   NULL != mali_group_get_pp_core(group)
+                            || NULL != mali_group_get_gp_core(group));
+
+       if (NULL != mali_group_get_pp_core(group)) {
+               mali_pp_scheduler_disable_group(group);
+       } else {
+               mali_gp_scheduler_disable_group(group);
+       }
+}
+
+static struct mali_pm_domain* mali_group_get_l2_domain(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+
+       /* l2_cache_core[0] stores the related l2 domain */
+       return group->l2_cache_core[0]->pm_domain;
+}
+
+void mali_group_get_pm_domain_ref(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       /* Get group used l2 domain ref */
+       mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
+       /* Get group used core domain ref */
+       mali_pm_domain_ref_get(group->pm_domain);
+}
+
+void mali_group_put_pm_domain_ref(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       /* Put group used core domain ref */
+       mali_pm_domain_ref_put(group->pm_domain);
+       /* Put group used l2 domain ref */
+       mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_group.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_group.h
new file mode 100644 (file)
index 0000000..9ef58f1
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/**
+ * @brief Default max runtime [ms] for a core job - used by timeout timers
+ */
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 4000
+
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+
+enum mali_group_core_state {
+       MALI_GROUP_STATE_IDLE,
+       MALI_GROUP_STATE_WORKING,
+       MALI_GROUP_STATE_OOM,
+       MALI_GROUP_STATE_IN_VIRTUAL,
+       MALI_GROUP_STATE_JOINING_VIRTUAL,
+       MALI_GROUP_STATE_LEAVING_VIRTUAL,
+       MALI_GROUP_STATE_DISABLED,
+};
+
+/* Forward declaration from mali_pm_domain.h */
+struct mali_pm_domain;
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group {
+       struct mali_mmu_core        *mmu;
+       struct mali_session_data    *session;
+
+       mali_bool                   power_is_on;
+       enum mali_group_core_state  state;
+
+       struct mali_gp_core         *gp_core;
+       struct mali_gp_job          *gp_running_job;
+
+       struct mali_pp_core         *pp_core;
+       struct mali_pp_job          *pp_running_job;
+       u32                         pp_running_sub_job;
+
+       struct mali_l2_cache_core   *l2_cache_core[2];
+       u32                         l2_cache_core_ref_count[2];
+
+       struct mali_dlbu_core       *dlbu_core;
+       struct mali_bcast_unit      *bcast_core;
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_t        *lock;
+#else
+       _mali_osk_spinlock_t            *lock;
+#endif
+
+       _mali_osk_list_t            pp_scheduler_list;
+
+       /* List used for virtual groups. For a virtual group, the list represents the
+        * head element. */
+       _mali_osk_list_t            group_list;
+
+       struct mali_group           *pm_domain_list;
+       struct mali_pm_domain       *pm_domain;
+
+       /* Parent virtual group (if any) */
+       struct mali_group           *parent_group;
+
+       _mali_osk_wq_work_t         *bottom_half_work_mmu;
+       _mali_osk_wq_work_t         *bottom_half_work_gp;
+       _mali_osk_wq_work_t         *bottom_half_work_pp;
+
+       _mali_osk_timer_t           *timeout_timer;
+       mali_bool                   core_timed_out;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+                                     struct mali_dlbu_core *dlbu,
+                                     struct mali_bcast_unit *bcast);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
+
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+#if defined(CONFIG_MALI450)
+       return (NULL != group->dlbu_core);
+#else
+       return MALI_FALSE;
+#endif
+}
+
+/** @brief Check if a group is considered as part of a virtual group
+ *
+ * @note A group is considered to be "part of" a virtual group also during the transition
+ *       in to / out of the virtual group.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+#if defined(CONFIG_MALI450)
+       return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
+               MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
+               MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+#else
+       return MALI_FALSE;
+#endif
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+void mali_group_zap_session(struct mali_group* group, struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+/** @brief Start fragment of PP job
+ */
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+mali_bool mali_group_power_is_on(struct mali_group *group);
+void mali_group_power_on_group(struct mali_group *group);
+void mali_group_power_off_group(struct mali_group *group, mali_bool power_status);
+void mali_group_power_on(void);
+
+/** @brief Prepare group for power off
+ *
+ * Update the group's state and prepare for the group to be powered off.
+ *
+ * If do_power_change is MALI_FALSE group session will be set to NULL so that
+ * no more activity will happen to this group, but the power state flag will be
+ * left unchanged.
+ *
+ * @do_power_change MALI_TRUE if power status is to be updated
+ */
+void mali_group_power_off(mali_bool do_power_change);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+/* MMU-related functions */
+_mali_osk_errcode_t mali_group_upper_half_mmu(void * data);
+
+/* GP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+
+/* PP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+/** @brief Check if group is enabled
+ *
+ * @param group group to check
+ * @return MALI_TRUE if enabled, MALI_FALSE if not
+ */
+mali_bool mali_group_is_enabled(struct mali_group *group);
+
+/** @brief Enable group
+ *
+ * An enabled job is put on the idle scheduler list and can be used to handle jobs.  Does nothing if
+ * group is already enabled.
+ *
+ * @param group group to enable
+ */
+void mali_group_enable(struct mali_group *group);
+
+/** @brief Disable group
+ *
+ * A disabled group will no longer be used by the scheduler.  If part of a virtual group, the group
+ * will be removed before being disabled.  Cores part of a disabled group is safe to power down.
+ *
+ * @param group group to disable
+ */
+void mali_group_disable(struct mali_group *group);
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+{
+       mali_bool empty = MALI_FALSE;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+       if (_mali_osk_list_empty(&group->group_list)) {
+               group->state = MALI_GROUP_STATE_DISABLED;
+               group->session = NULL;
+
+               empty = MALI_TRUE;
+       }
+
+       return empty;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+{
+       mali_bool empty = MALI_FALSE;
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+       if (_mali_osk_list_empty(&group->group_list)) {
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+               group->state = MALI_GROUP_STATE_IDLE;
+
+               empty = MALI_TRUE;
+       }
+
+       return empty;
+}
+
+/* Get group used l2 domain and core domain ref */
+void mali_group_get_pm_domain_ref(struct mali_group *group);
+/* Put group used l2 domain and core domain ref */
+void mali_group_put_pm_domain_ref(struct mali_group *group);
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_hw_core.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_hw_core.c
new file mode 100644 (file)
index 0000000..706de8a
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+       core->phys_addr = resource->base;
+       core->phys_offset = resource->base - _mali_osk_resource_base_address();
+       core->description = resource->description;
+       core->size = reg_size;
+
+       MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr);
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) {
+               core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+               if (NULL != core->mapped_registers) {
+                       return _MALI_OSK_ERR_OK;
+               } else {
+                       MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+               }
+               _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+       } else {
+               MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+       _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+       core->mapped_registers = NULL;
+       _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_hw_core.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_hw_core.h
new file mode 100644 (file)
index 0000000..3514d78
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core {
+       u32 phys_addr;                    /**< Physical address of the registers */
+       u32 phys_offset;                  /**< Offset from start of Mali to registers */
+       u32 size;                         /**< Size of registers */
+       mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+       const char* description;          /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+       u32 read_val;
+       read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+       MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, read_val));
+       return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, new_val));
+       _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, new_val));
+       if(old_val != new_val) {
+               _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+       }
+}
+
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, new_val));
+       _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+       u32 i;
+       MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+                            core->description,relative_address, nr_of_regs));
+
+       /* Do not use burst writes against the registers */
+       for (i = 0; i< nr_of_regs; i++) {
+               mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+       }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32* old_array)
+{
+       u32 i;
+       MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+                            core->description,relative_address, nr_of_regs));
+
+       /* Do not use burst writes against the registers */
+       for (i = 0; i< nr_of_regs; i++) {
+               if(old_array[i] != write_array[i]) {
+                       mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+               }
+       }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_common.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_common.h
new file mode 100644 (file)
index 0000000..08a3a88
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+#if defined(_DEBUG)
+#define DEBUG
+#endif
+#endif
+
+/* MALI_SEC */
+/* Macro for generating a kernel panic.
+ * Turned on off by compile-time Makefile settings
+ */
+#if defined(USING_KERNEL_PANIC)
+#include <linux/kernel.h>
+       #define MALI_PANIC(fmt, args...) panic( fmt, ## args );
+#else
+       #define MALI_PANIC(fmt, args...) 
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...)           Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) )    Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer)  Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X )       The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The  printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+/**
+*  Fundamental error macro. Reports an error code. This is abstracted to allow us to
+*  easily switch to a different error reporting method if we want, and also to allow
+*  us to search for error returns easily.
+*
+*  Note no closing semicolon - this is supplied in typical usage:
+*
+*  MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+*/
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ *  Basic error macro, to indicate success.
+ *  Note no closing semicolon - this is supplied in typical usage:
+ *
+ *  MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ *     Basic error macro. This checks whether the given condition is true, and if not returns
+ *     from this function with the supplied error code. This is a macro so that we can override it
+ *     for stress testing.
+ *
+ *     Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ *     else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ *     MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ *     Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ *     then the value is returned from the enclosing function as an error code. This effectively
+ *     acts as a guard clause, and propagates error values up the call stack. This uses a
+ *     temporary value to ensure that the error expression is not evaluated twice.
+ *  If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ *  returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+    do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+         if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+         MALI_ERROR(_check_no_error_result); \
+    } while(0)
+
+/**
+ *  Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ *     Error macro with goto. This checks whether the given condition is true, and if not jumps
+ *     to the specified label using a goto. The label must therefore be local to the function in
+ *     which this macro appears. This is most usually used to execute some clean-up code before
+ *     exiting with a call to ERROR.
+ *
+ *     Like the other macros, this is a macro to allow us to override the condition if we wish,
+ *     e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ *  Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ *  Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+       MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+       MALI_PRINTF(("           %s()%4d\n           ", __FUNCTION__, __LINE__)) ; \
+       MALI_PRINTF(args); \
+       MALI_PRINTF(("\n")); \
+       } while(0)
+
+#define MALI_PRINT(args) do{ \
+       MALI_PRINTF(("Mali: ")); \
+       MALI_PRINTF(args); \
+       } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args)  do { \
+       if((level) <=  mali_debug_level)\
+        {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+       } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args)  \
+       if((condition)&&((level) <=  mali_debug_level))\
+        {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+       else if((level) <=  mali_debug_level)\
+    { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do  {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do  {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do  {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_core.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_core.c
new file mode 100644 (file)
index 0000000..d4900b3
--- /dev/null
@@ -0,0 +1,1399 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_dma.h"
+#include "mali_timeline.h"
+#include "mali_soft_job.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+unsigned int mali_shared_mem_size = 0;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/* Mali max job runtime */
+extern int mali_max_job_runtime;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+       global_gpu_base_address = _mali_osk_resource_base_address();
+       if (0 == global_gpu_base_address) {
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+       switch (resource_pp->base - global_gpu_base_address) {
+       case 0x08000:
+       case 0x20000: /* fall-through for aliased mapping */
+               return 0x01;
+       case 0x0A000:
+       case 0x22000: /* fall-through for aliased mapping */
+               return 0x02;
+       case 0x0C000:
+       case 0x24000: /* fall-through for aliased mapping */
+               return 0x04;
+       case 0x0E000:
+       case 0x26000: /* fall-through for aliased mapping */
+               return 0x08;
+       case 0x28000:
+               return 0x10;
+       case 0x2A000:
+               return 0x20;
+       case 0x2C000:
+               return 0x40;
+       case 0x2E000:
+               return 0x80;
+       default:
+               return 0;
+       }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+       /*
+        * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
+        * Look at the version register for the first PP core in order to determine the GPU HW revision.
+        */
+
+       u32 first_pp_offset;
+       _mali_osk_resource_t first_pp_resource;
+
+       /* Find out where the first PP core is located */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL)) {
+               /* Mali-300/400/450 */
+               first_pp_offset = 0x8000;
+       } else {
+               /* Mali-200 */
+               first_pp_offset = 0x0000;
+       }
+
+       /* Find the first PP core resource (again) */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource)) {
+               /* Create a dummy PP object for this core so that we can read the version register */
+               struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+               if (NULL != group) {
+                       struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+                       if (NULL != pp_core) {
+                               u32 pp_version = mali_pp_core_get_version(pp_core);
+                               mali_group_delete(group);
+
+                               global_gpu_major_version = (pp_version >> 8) & 0xFF;
+                               global_gpu_minor_version = pp_version & 0xFF;
+
+                               switch (pp_version >> 16) {
+                               case MALI200_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI200;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+                                       _mali_osk_abort();
+                                       break;
+                               case MALI300_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI300;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               case MALI400_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI400;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               case MALI450_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI450;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               default:
+                                       MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               return _MALI_OSK_ERR_OK;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+                       }
+               } else {
+                       MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+               }
+       } else {
+               MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+static void mali_resource_count(u32 *pp_count, u32 *l2_count)
+{
+       *pp_count = 0;
+       *l2_count = 0;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
+               ++(*pp_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
+               ++(*pp_count);
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
+               ++(*l2_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
+               ++(*l2_count);
+       }
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
+               ++(*l2_count);
+       }
+}
+
+static void mali_delete_groups(void)
+{
+       struct mali_group *group;
+
+       group = mali_group_get_glob_group(0);
+       while (NULL != group) {
+               mali_group_delete(group);
+               group = mali_group_get_glob_group(0);
+       }
+
+       MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups());
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+       struct mali_l2_cache_core *l2;
+
+       l2 = mali_l2_cache_core_get_glob_l2_core(0);
+       while (NULL != l2) {
+               mali_l2_cache_delete(l2);
+               l2 = mali_l2_cache_core_get_glob_l2_core(0);
+       }
+
+       MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+{
+       struct mali_l2_cache_core *l2_cache = NULL;
+
+       if (NULL != resource) {
+
+               MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+               l2_cache = mali_l2_cache_create(resource);
+               if (NULL == l2_cache) {
+                       MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+                       return NULL;
+               }
+       }
+       MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+       return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+       struct mali_l2_cache_core *l2_cache = NULL;
+
+       if (mali_is_mali400()) {
+               _mali_osk_resource_t l2_resource;
+               if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               l2_cache = mali_create_l2_cache_core(&l2_resource);
+               if (NULL == l2_cache) {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
+       } else if (mali_is_mali450()) {
+               /*
+                * L2 for GP    at 0x10000
+                * L2 for PP0-3 at 0x01000
+                * L2 for PP4-7 at 0x11000 (optional)
+                */
+
+               _mali_osk_resource_t l2_gp_resource;
+               _mali_osk_resource_t l2_pp_grp0_resource;
+               _mali_osk_resource_t l2_pp_grp1_resource;
+
+               /* Make cluster for GP's L2 */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+                       mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               /* Find corresponding l2 domain */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+                       mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L21_DOMAIN_INDEX), l2_cache);
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               /* Second PP core group is optional, don't fail if we don't find it */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+                       mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L22_DOMAIN_INDEX), l2_cache);
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+        _mali_osk_resource_t *resource_mmu,
+        _mali_osk_resource_t *resource_gp,
+        _mali_osk_resource_t *resource_pp)
+{
+       struct mali_mmu_core *mmu;
+       struct mali_group *group;
+
+       MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+       /* Create the group object */
+       group = mali_group_create(cache, NULL, NULL);
+       if (NULL == group) {
+               MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+               return NULL;
+       }
+
+       /* Create the MMU object inside group */
+       mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+       if (NULL == mmu) {
+               MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+               mali_group_delete(group);
+               return NULL;
+       }
+
+       if (NULL != resource_gp) {
+               /* Create the GP core object inside this group */
+               struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+               if (NULL == gp_core) {
+                       /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+                       MALI_PRINT_ERROR(("Failed to create GP object\n"));
+                       mali_group_delete(group);
+                       return NULL;
+               }
+       }
+
+       if (NULL != resource_pp) {
+               struct mali_pp_core *pp_core;
+
+               /* Create the PP core object inside this group */
+               pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+               if (NULL == pp_core) {
+                       /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+                       MALI_PRINT_ERROR(("Failed to create PP object\n"));
+                       mali_group_delete(group);
+                       return NULL;
+               }
+       }
+
+       /* Reset group */
+       mali_group_lock(group);
+       mali_group_reset(group);
+       mali_group_unlock(group);
+
+       return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+        _mali_osk_resource_t *resource_pp_bcast,
+        _mali_osk_resource_t *resource_dlbu,
+        _mali_osk_resource_t *resource_bcast)
+{
+       struct mali_mmu_core *mmu_pp_bcast_core;
+       struct mali_pp_core *pp_bcast_core;
+       struct mali_dlbu_core *dlbu_core;
+       struct mali_bcast_unit *bcast_core;
+       struct mali_group *group;
+
+       MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+       /* Create the DLBU core object */
+       dlbu_core = mali_dlbu_create(resource_dlbu);
+       if (NULL == dlbu_core) {
+               MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the Broadcast unit core */
+       bcast_core = mali_bcast_unit_create(resource_bcast);
+       if (NULL == bcast_core) {
+               MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+               mali_dlbu_delete(dlbu_core);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the group object */
+       group = mali_group_create(NULL, dlbu_core, bcast_core);
+       if (NULL == group) {
+               MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+               mali_bcast_unit_delete(bcast_core);
+               mali_dlbu_delete(dlbu_core);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the MMU object inside group */
+       mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+       if (NULL == mmu_pp_bcast_core) {
+               MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the PP core object inside this group */
+       pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+       if (NULL == pp_bcast_core) {
+               /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+               MALI_PRINT_ERROR(("Failed to create PP object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+       struct mali_group *group;
+       int cluster_id_gp = 0;
+       int cluster_id_pp_grp0 = 0;
+       int cluster_id_pp_grp1 = 0;
+       int i;
+
+       _mali_osk_resource_t resource_gp;
+       _mali_osk_resource_t resource_gp_mmu;
+       _mali_osk_resource_t resource_pp[8];
+       _mali_osk_resource_t resource_pp_mmu[8];
+       _mali_osk_resource_t resource_pp_mmu_bcast;
+       _mali_osk_resource_t resource_pp_bcast;
+       _mali_osk_resource_t resource_dlbu;
+       _mali_osk_resource_t resource_bcast;
+       _mali_osk_errcode_t resource_gp_found;
+       _mali_osk_errcode_t resource_gp_mmu_found;
+       _mali_osk_errcode_t resource_pp_found[8];
+       _mali_osk_errcode_t resource_pp_mmu_found[8];
+       _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+       _mali_osk_errcode_t resource_pp_bcast_found;
+       _mali_osk_errcode_t resource_dlbu_found;
+       _mali_osk_errcode_t resource_bcast_found;
+
+       if (!(mali_is_mali400() || mali_is_mali450())) {
+               /* No known HW core */
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
+               /* Group settings are not overridden by module parameters, so use device settings */
+               struct _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+                       /* Use device specific settings (if defined) */
+                       if (0 != data.max_job_runtime) {
+                               mali_max_job_runtime = data.max_job_runtime;
+                       }
+               }
+       }
+
+       if (mali_is_mali450()) {
+               /* Mali-450 have separate L2s for GP, and PP core group(s) */
+               cluster_id_pp_grp0 = 1;
+               cluster_id_pp_grp1 = 2;
+       }
+
+       resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
+       resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
+       resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
+       resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
+       resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
+       resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
+       resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
+       resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
+       resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
+       resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
+       resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
+       resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
+       resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
+       resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
+       resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
+       resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
+       resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
+       resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+
+
+       if (mali_is_mali450()) {
+               resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
+               resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
+               resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
+               resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+
+               if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+                   _MALI_OSK_ERR_OK != resource_dlbu_found ||
+                   _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+                   _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
+                       /* Missing mandatory core(s) for Mali-450 */
+                       MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK != resource_gp_found ||
+           _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+           _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+           _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) {
+               /* Missing mandatory core(s) */
+               MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+       if (NULL == group) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Add GP in group, for PMU ref count */
+       mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX), group);
+
+       /* Create group for first (and mandatory) PP core */
+       MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+       if (NULL == group) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Find corresponding pp domain */
+       mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_PP0_DOMAIN_INDEX), group);
+
+       mali_inited_pp_cores_group_1++;
+
+       /* Create groups for rest of the cores in the first PP core group */
+       for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
+               if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
+                       if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+                               if (NULL == group) {
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+
+                               mali_inited_pp_cores_group_1++;
+                       }
+               }
+       }
+
+       /* Create groups for cores in the second PP core group */
+       for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */
+               if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
+                       if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+                               MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+                               if (NULL == group) {
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+                               mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+                               mali_inited_pp_cores_group_2++;
+                       }
+               }
+       }
+
+       if(mali_is_mali450()) {
+               _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+               if (_MALI_OSK_ERR_OK != err) {
+                       return err;
+               }
+       }
+
+       mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+       mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+       MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_TRUE == _mali_osk_shared_interrupts()) {
+               MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+       /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_pm_domains(void)
+{
+       int i;
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0x0 == mali_pmu_get_domain_mask(i)) continue;
+
+               if (NULL == mali_pm_domain_create(mali_pmu_get_domain_mask(i))) {
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_use_default_pm_domain_config(void)
+{
+       u32 pp_count_gr1 = 0;
+       u32 pp_count_gr2 = 0;
+       u32 l2_count = 0;
+
+       MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+       /* GP core */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x00000, NULL)) {
+               mali_pmu_set_domain_mask(MALI_GP_DOMAIN_INDEX, 0x01);
+       }
+
+       /* PP0 - PP3 core */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
+               ++pp_count_gr1;
+
+               if (mali_is_mali400()) {
+                       mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01<<2);
+               } else if (mali_is_mali450()) {
+                       mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01<<1);
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
+               ++pp_count_gr1;
+
+               if (mali_is_mali400()) {
+                       mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01<<3);
+               } else if (mali_is_mali450()) {
+                       mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01<<2);
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
+               ++pp_count_gr1;
+
+               if (mali_is_mali400()) {
+                       mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01<<4);
+               } else if (mali_is_mali450()) {
+                       mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01<<2);
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
+               ++pp_count_gr1;
+
+               if (mali_is_mali400()) {
+                       mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01<<5);
+               } else if (mali_is_mali450()) {
+                       mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01<<2);
+               }
+       }
+
+       /* PP4 - PP7 */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
+               ++pp_count_gr2;
+
+               mali_pmu_set_domain_mask(MALI_PP4_DOMAIN_INDEX, 0x01<<3);
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
+               ++pp_count_gr2;
+
+               mali_pmu_set_domain_mask(MALI_PP5_DOMAIN_INDEX, 0x01<<3);
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
+               ++pp_count_gr2;
+
+               mali_pmu_set_domain_mask(MALI_PP6_DOMAIN_INDEX, 0x01<<3);
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
+               ++pp_count_gr2;
+
+               mali_pmu_set_domain_mask(MALI_PP7_DOMAIN_INDEX, 0x01<<3);
+       }
+
+       /* L2gp/L2PP0/L2PP4 */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
+               ++l2_count;
+
+               if (mali_is_mali400()) {
+                       mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01<<1);
+               } else if (mali_is_mali450()) {
+                       mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01<<0);
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
+               ++l2_count;
+
+               mali_pmu_set_domain_mask(MALI_L21_DOMAIN_INDEX, 0x01<<1);
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
+               ++l2_count;
+
+               mali_pmu_set_domain_mask(MALI_L22_DOMAIN_INDEX, 0x01<<3);
+       }
+
+       MALI_DEBUG_PRINT(2, ("Using default PMU domain config: (%d) gr1_pp_cores, (%d) gr2_pp_cores, (%d) l2_count. \n", pp_count_gr1, pp_count_gr2, l2_count));
+}
+
+static void mali_set_pmu_global_domain_config(void)
+{
+       struct _mali_osk_device_data data = { 0, };
+       int i = 0;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Check whether has customized pmu domain configure */
+               for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+                       if (0 != data.pmu_domain_config[i]) break;
+               }
+
+               if (MALI_MAX_NUMBER_OF_DOMAINS == i) {
+                       mali_use_default_pm_domain_config();
+               } else {
+                       /* Copy the customer config to global config */
+                       mali_pmu_copy_domain_mask(data.pmu_domain_config, sizeof(data.pmu_domain_config));
+               }
+       }
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+       _mali_osk_resource_t resource_pmu;
+
+       MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu)) {
+               struct mali_pmu_core *pmu;
+
+               mali_set_pmu_global_domain_config();
+
+               pmu = mali_pmu_create(&resource_pmu);
+               if (NULL == pmu) {
+                       MALI_PRINT_ERROR(("Failed to create PMU\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       /* It's ok if the PMU doesn't exist */
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_dma(void)
+{
+       _mali_osk_resource_t resource_dma;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x12000, &resource_dma)) {
+               if (NULL == mali_dma_create(&resource_dma)) {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               return _MALI_OSK_ERR_OK;
+       } else {
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+       _mali_osk_errcode_t ret;
+
+       if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+               /* Memory settings are not overridden by module parameters, so use device settings */
+               struct _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+                       /* Use device specific settings (if defined) */
+                       mali_dedicated_mem_start = data.dedicated_mem_start;
+                       mali_dedicated_mem_size = data.dedicated_mem_size;
+                       mali_shared_mem_size = data.shared_mem_size;
+               }
+
+               if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+                       /* No GPU memory specified */
+                       return _MALI_OSK_ERR_INVALID_ARGS;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+                                    mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+       } else {
+               MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+                                    mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+       }
+
+       if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+               /* Dedicated memory */
+               ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       if (0 < mali_shared_mem_size) {
+               /* Shared OS memory */
+               ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       if (0 == mali_fb_start && 0 == mali_fb_size) {
+               /* Frame buffer settings are not overridden by module parameters, so use device settings */
+               struct _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+                       /* Use device specific settings (if defined) */
+                       mali_fb_start = data.fb_start;
+                       mali_fb_size = data.fb_size;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+                                    mali_fb_size, mali_fb_start));
+       } else {
+               MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+                                    mali_fb_size, mali_fb_start));
+       }
+
+       if (0 != mali_fb_size) {
+               /* Register frame buffer */
+               ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_detect_gpu_class(void)
+{
+       u32 number_of_pp_cores = 0;
+       u32 number_of_l2_caches = 0;
+
+       mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
+       if (number_of_l2_caches > 1) {
+               mali_gpu_class_is_mali450 = MALI_TRUE;
+       }
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+       _mali_osk_errcode_t err;
+       struct mali_pmu_core *pmu;
+
+       mali_pp_job_initialize();
+
+       err = mali_session_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if defined(CONFIG_MALI400_PROFILING)
+       err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+       if (_MALI_OSK_ERR_OK != err) {
+               /* No biggie if we weren't able to initialize the profiling */
+               MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+       }
+#endif
+
+       err = mali_memory_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+       /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+       err = mali_parse_config_memory();
+       if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+       err = mali_set_global_gpu_base_address();
+       if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+
+       /* Detect gpu class according to l2 cache number */
+       mali_detect_gpu_class();
+
+       err = mali_check_shared_interrupts();
+       if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+
+       err = mali_pp_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+       /* Initialize the power management module */
+       err = mali_pm_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+       /* Initialize the MALI PMU */
+       err = mali_parse_config_pmu();
+       if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+       /* Make sure the power stays on for the rest of this function */
+       err = _mali_osk_pm_dev_ref_add();
+       if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
+
+       /*
+        * If run-time PM is used, then the mali_pm module has now already been
+        * notified that the power now is on (through the resume callback functions).
+        * However, if run-time PM is not used, then there will probably not be any
+        * calls to the resume callback functions, so we need to explicitly tell it
+        * that the power is on.
+        */
+       mali_pm_set_power_is_on();
+
+       /* Reset PMU HW and ensure all Mali power domains are on */
+       pmu = mali_pmu_get_global_pmu_core();
+       if (NULL != pmu) {
+               err = mali_pmu_reset(pmu);
+               if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+       }
+
+       /* Detect which Mali GPU we are dealing with */
+       err = mali_parse_product_info();
+       if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+       /* The global_product_id is now populated with the correct Mali GPU */
+
+       /* Create PM domains only if PMU exists */
+       if (NULL != pmu) {
+               err = mali_create_pm_domains();
+               if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
+       }
+
+       /* Initialize MMU module */
+       err = mali_mmu_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+       if (mali_is_mali450()) {
+               err = mali_dlbu_initialize();
+               if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+
+               err = mali_parse_config_dma();
+               if (_MALI_OSK_ERR_OK != err) goto dma_parsing_failed;
+       }
+
+       /* Start configuring the actual Mali hardware. */
+       err = mali_parse_config_l2_cache();
+       if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+       err = mali_parse_config_groups();
+       if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+       /* Initialize the schedulers */
+       err = mali_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+       err = mali_gp_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+
+       /* PP scheduler population can't fail */
+       mali_pp_scheduler_populate();
+
+       /* Initialize the GPU utilization tracking */
+       err = mali_utilization_init();
+       if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+
+       /* Allowing the system to be turned off */
+       _mali_osk_pm_dev_ref_dec();
+
+       MALI_SUCCESS; /* all ok */
+
+       /* Error handling */
+
+utilization_init_failed:
+       mali_pp_scheduler_depopulate();
+       mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+       mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+       mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
+       mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
+       {
+               struct mali_dma_core *dma = mali_dma_get_global_dma_core();
+               if (NULL != dma) mali_dma_delete(dma);
+       }
+dma_parsing_failed:
+       mali_dlbu_terminate();
+dlbu_init_failed:
+       mali_mmu_terminate();
+mmu_init_failed:
+       mali_pm_domain_terminate();
+pm_domain_failed:
+       /* Nothing to roll back */
+product_info_parsing_failed:
+       /* Nothing to roll back */
+pmu_reset_failed:
+       /* Allowing the system to be turned off */
+       _mali_osk_pm_dev_ref_dec();
+pm_always_on_failed:
+       pmu = mali_pmu_get_global_pmu_core();
+       if (NULL != pmu) {
+               mali_pmu_delete(pmu);
+       }
+parse_pmu_config_failed:
+       mali_pm_terminate();
+pm_init_failed:
+       mali_pp_scheduler_terminate();
+pp_scheduler_init_failed:
+check_shared_interrupts_failed:
+       global_gpu_base_address = 0;
+set_global_gpu_base_address_failed:
+       /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
+parse_memory_config_failed:
+       mali_memory_terminate();
+memory_init_failed:
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_term();
+#endif
+       mali_session_terminate();
+session_init_failed:
+       mali_pp_job_terminate();
+       return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+       struct mali_dma_core *dma = mali_dma_get_global_dma_core();
+
+       MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+       /* shut down subsystems in reverse order from startup */
+
+       /* We need the GPU to be powered up for the terminate sequence */
+       _mali_osk_pm_dev_ref_add();
+
+       mali_utilization_term();
+       mali_pp_scheduler_depopulate();
+       mali_gp_scheduler_terminate();
+       mali_scheduler_terminate();
+       mali_delete_l2_cache_cores();
+       if (mali_is_mali450()) {
+               mali_dlbu_terminate();
+       }
+       mali_mmu_terminate();
+       if (NULL != pmu) {
+               mali_pmu_delete(pmu);
+       }
+       if (NULL != dma) {
+               mali_dma_delete(dma);
+       }
+       mali_pm_terminate();
+       mali_memory_terminate();
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_term();
+#endif
+
+       /* Allowing the system to be turned off */
+       _mali_osk_pm_dev_ref_dec();
+
+       mali_pp_scheduler_terminate();
+       mali_session_terminate();
+
+       mali_pp_job_terminate();
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+       return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+       return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+       return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       /* check compatability */
+       if ( args->version == _MALI_UK_API_VERSION ) {
+               args->compatible = 1;
+       } else {
+               args->compatible = 0;
+       }
+
+       args->version = _MALI_UK_API_VERSION; /* report our version */
+
+       /* success regardless of being compatible or not */
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+       _mali_osk_errcode_t err;
+       _mali_osk_notification_t * notification;
+       _mali_osk_notification_queue_t *queue;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+       /* if the queue does not exist we're currently shutting down */
+       if (NULL == queue) {
+               MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+               args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+               MALI_SUCCESS;
+       }
+
+       /* receive a notification, might sleep */
+       err = _mali_osk_notification_queue_receive(queue, &notification);
+       if (_MALI_OSK_ERR_OK != err) {
+               MALI_ERROR(err); /* errcode returned, pass on to caller */
+       }
+
+       /* copy the buffer to the user */
+       args->type = (_mali_uk_notification_type)notification->notification_type;
+       _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+       /* finished with the notification */
+       _mali_osk_notification_delete( notification );
+
+       MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+       _mali_osk_notification_t * notification;
+       _mali_osk_notification_queue_t *queue;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+       /* if the queue does not exist we're currently shutting down */
+       if (NULL == queue) {
+               MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+               MALI_SUCCESS;
+       }
+
+       notification = _mali_osk_notification_create(args->type, 0);
+       if (NULL == notification) {
+               MALI_PRINT_ERROR( ("Failed to create notification object\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       _mali_osk_notification_queue_send(queue, notification);
+
+       MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_request_high_priority( _mali_uk_request_high_priority_s *args )
+{
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session = (struct mali_session_data *) args->ctx;
+
+       if (!session->use_high_priority_job_queue) {
+               session->use_high_priority_job_queue = MALI_TRUE;
+               MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
+       }
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+       u32 i;
+       struct mali_session_data *session;
+
+       /* allocated struct to track this session */
+       session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+       MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+       /* create a response queue for this session */
+       session->ioctl_queue = _mali_osk_notification_queue_init();
+       if (NULL == session->ioctl_queue) {
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       session->page_directory = mali_mmu_pagedir_alloc();
+       if (NULL == session->page_directory) {
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
+               MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       if (0 != mali_dlbu_phys_addr) {
+               mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+                                       _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+               mali_mmu_pagedir_free(session->page_directory);
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       /* Create soft system. */
+       session->soft_job_system = mali_soft_job_system_create(session);
+       if (NULL == session->soft_job_system) {
+               mali_memory_session_end(session);
+               mali_mmu_pagedir_free(session->page_directory);
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       /* Create timeline system. */
+       session->timeline_system = mali_timeline_system_create(session);
+       if (NULL == session->timeline_system) {
+               mali_soft_job_system_destroy(session->soft_job_system);
+               mali_memory_session_end(session);
+               mali_mmu_pagedir_free(session->page_directory);
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&session->number_of_window_jobs, 0)) {
+               MALI_DEBUG_PRINT_ERROR(("Initialization of atomic number_of_window_jobs failed.\n"));
+               mali_timeline_system_destroy(session->timeline_system);
+               mali_soft_job_system_destroy(session->soft_job_system);
+               mali_memory_session_end(session);
+               mali_mmu_pagedir_free(session->page_directory);
+               _mali_osk_notification_queue_term(session->ioctl_queue);
+               _mali_osk_free(session);
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+
+       session->use_high_priority_job_queue = MALI_FALSE;
+
+       /* Initialize list of PP jobs on this session. */
+       _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list);
+
+       /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */
+       for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) {
+               _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
+       }
+
+       *context = (void*)session;
+
+       /* Add session to the list of all sessions. */
+       mali_session_add(session);
+
+       MALI_DEBUG_PRINT(2, ("Session started\n"));
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+       struct mali_session_data *session;
+       MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+       session = (struct mali_session_data *)*context;
+
+       MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+       MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+       MALI_DEBUG_ASSERT_POINTER(session->timeline_system);
+
+       /* Remove session from list of all sessions. */
+       mali_session_remove(session);
+
+       /* This flag is used to prevent queueing of jobs due to activation. */
+       session->is_aborting = MALI_TRUE;
+
+       /* Stop the soft job timer. */
+       mali_timeline_system_stop_timer(session->timeline_system);
+
+       /* Abort queued and running GP and PP jobs. */
+       mali_gp_scheduler_abort_session(session);
+       mali_pp_scheduler_abort_session(session);
+
+       /* Abort the soft job system. */
+       mali_soft_job_system_abort(session->soft_job_system);
+
+       /* Force execution of all pending bottom half processing for GP and PP. */
+       _mali_osk_wq_flush();
+
+       /* The session PP list should now be empty. */
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list));
+
+       /* At this point the GP and PP scheduler no longer has any jobs queued or running from this
+        * session, and all soft jobs in the soft job system has been destroyed. */
+
+       /* Any trackers left in the timeline system are directly or indirectly waiting on external
+        * sync fences.  Cancel all sync fence waiters to trigger activation of all remaining
+        * trackers.  This call will sleep until all timelines are empty. */
+       mali_timeline_system_abort(session->timeline_system);
+
+       /* Flush pending work.
+        * Needed to make sure all bottom half processing related to this
+        * session has been completed, before we free internal data structures.
+        */
+       _mali_osk_wq_flush();
+
+       /* Destroy timeline system. */
+       mali_timeline_system_destroy(session->timeline_system);
+       session->timeline_system = NULL;
+
+       /* Destroy soft system. */
+       mali_soft_job_system_destroy(session->soft_job_system);
+       session->soft_job_system = NULL;
+
+       MALI_DEBUG_CODE( {
+               /* Check that the pp_job_fb_lookup_list array is empty. */
+               u32 i;
+               for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i)
+               {
+                       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_fb_lookup_list[i]));
+               }
+       });
+
+       /* Free remaining memory allocated to this session */
+       mali_memory_session_end(session);
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       _mali_osk_atomic_term(&session->number_of_window_jobs);
+#endif
+
+       /* Free session data structures */
+       mali_mmu_pagedir_free(session->page_directory);
+       _mali_osk_notification_queue_term(session->ioctl_queue);
+       _mali_osk_free(session);
+
+       *context = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+       MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char* buf, u32 size)
+{
+       int n = 0; /* Number of bytes written to buf */
+
+       n += mali_gp_scheduler_dump_state(buf + n, size - n);
+       n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+       return n;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_core.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_core.h
new file mode 100644 (file)
index 0000000..cb42337
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+typedef enum {
+       _MALI_PRODUCT_ID_UNKNOWN,
+       _MALI_PRODUCT_ID_MALI200,
+       _MALI_PRODUCT_ID_MALI300,
+       _MALI_PRODUCT_ID_MALI400,
+       _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+extern mali_bool mali_gpu_class_is_mali450;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char* buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+#if defined(CONFIG_MALI450)
+       return mali_gpu_class_is_mali450;
+#else
+       return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+#if !defined(CONFIG_MALI450)
+       return MALI_TRUE;
+#else
+       return !mali_gpu_class_is_mali450;
+#endif
+}
+
+#endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_descriptor_mapping.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_descriptor_mapping.c
new file mode 100644 (file)
index 0000000..e365fab
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+       mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+       init_entries = MALI_PAD_INT(init_entries);
+       max_entries = MALI_PAD_INT(max_entries);
+
+       if (NULL != map) {
+               map->table = descriptor_table_alloc(init_entries);
+               if (NULL != map->table) {
+                       map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+                       if (NULL != map->lock) {
+                               _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+                               map->max_nr_mappings_allowed = max_entries;
+                               map->current_nr_mappings = init_entries;
+                               return map;
+                       }
+                       descriptor_table_free(map->table);
+               }
+               _mali_osk_free(map);
+       }
+       return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+       descriptor_table_free(map->table);
+       _mali_osk_mutex_rw_term(map->lock);
+       _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       int new_descriptor;
+
+       MALI_DEBUG_ASSERT_POINTER(map);
+       MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+       if (new_descriptor == map->current_nr_mappings) {
+               /* no free descriptor, try to expand the table */
+               mali_descriptor_table * new_table, * old_table;
+               if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+               map->current_nr_mappings += BITS_PER_LONG;
+               new_table = descriptor_table_alloc(map->current_nr_mappings);
+               if (NULL == new_table) goto unlock_and_exit;
+
+               old_table = map->table;
+               _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+               _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+               map->table = new_table;
+               descriptor_table_free(old_table);
+       }
+
+       /* we have found a valid descriptor, set the value and usage bit */
+       _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+       map->table->mappings[new_descriptor] = target;
+       *odescriptor = new_descriptor;
+       err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+       MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(map);
+       MALI_DEBUG_ASSERT_POINTER(callback);
+
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       /* id 0 is skipped as it's an reserved ID not mapping to anything */
+       for (i = 1; i < map->current_nr_mappings; ++i) {
+               if (_mali_osk_test_bit(i, map->table->usage)) {
+                       callback(i, map->table->mappings[i]);
+               }
+       }
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+       _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+       MALI_DEBUG_ASSERT_POINTER(map);
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) {
+               *target = map->table->mappings[descriptor];
+               result = _MALI_OSK_ERR_OK;
+       } else *target = NULL;
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+       MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+       _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) {
+               map->table->mappings[descriptor] = target;
+               result = _MALI_OSK_ERR_OK;
+       }
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+       MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+       void *old_value = NULL;
+
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) {
+               old_value = map->table->mappings[descriptor];
+               map->table->mappings[descriptor] = NULL;
+               _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+       }
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+       return old_value;
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+       mali_descriptor_table * table;
+
+       table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+       if (NULL != table) {
+               table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+               table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+       }
+
+       return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+       _mali_osk_free(table);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_descriptor_mapping.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_descriptor_mapping.h
new file mode 100644 (file)
index 0000000..e0ec55c
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table {
+       u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+       void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping {
+       _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+       int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+       int current_nr_mappings; /**< Current number of possible mappings */
+       mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_utilization.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_utilization.c
new file mode 100644 (file)
index 0000000..bc1ac87
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_scheduler.h"
+
+/* Thresholds for GP bound detection. */
+#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
+#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+static _mali_osk_spinlock_irq_t *time_data_lock;
+
+static u32 num_running_gp_cores;
+static u32 num_running_pp_cores;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u64 period_start_time = 0;
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+static u32 mali_utilization_timeout = 100;
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+extern void mali_power_performance_policy_callback(struct mali_gpu_utilization_data *data);
+#define NUMBER_OF_NANOSECONDS_PER_SECOND  1000000000ULL
+
+static u32 calculate_window_render_fps(u64 time_period)
+{
+       u32 max_window_number;
+       u64 tmp;
+       u64 max = time_period;
+       u32 leading_zeroes;
+       u32 shift_val;
+       u32 time_period_shift;
+       u32 max_window_number_shift;
+       u32 ret_val;
+
+       max_window_number = mali_session_max_window_num();
+       /* To avoid float division, extend the dividend to ns unit */
+       tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+       if (tmp > time_period) {
+               max = tmp;
+       }
+
+       /*
+        * We may have 64-bit values, a dividend or a divisor or both
+        * To avoid dependencies to a 64-bit divider, we shift down the two values
+        * equally first.
+        */
+       leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+       shift_val = 32 - leading_zeroes;
+
+       time_period_shift = (u32)(time_period >> shift_val);
+       max_window_number_shift = (u32)(tmp >> shift_val);
+
+       ret_val = max_window_number_shift / time_period_shift;
+
+       return ret_val;
+}
+#endif  /* defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) */
+
+static void calculate_gpu_utilization(void* arg)
+{
+       u64 time_now;
+       u64 time_period;
+       u32 leading_zeroes;
+       u32 shift_val;
+       u32 work_normalized_gpu;
+       u32 work_normalized_gp;
+       u32 work_normalized_pp;
+       u32 period_normalized;
+       u32 utilization_gpu;
+       u32 utilization_gp;
+       u32 utilization_pp;
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       u32 window_render_fps;
+#endif
+
+       _mali_osk_spinlock_irq_lock(time_data_lock);
+
+       if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+               /*
+                * No work done for this period
+                * - No need to reschedule timer
+                * - Report zero usage
+                */
+               timer_running = MALI_FALSE;
+
+               last_utilization_gpu = 0;
+               last_utilization_gp = 0;
+               last_utilization_pp = 0;
+
+               _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+               if (NULL != mali_utilization_callback) {
+                       struct mali_gpu_utilization_data data = { 0, };
+                       mali_utilization_callback(&data);
+               }
+
+               mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+
+               return;
+       }
+
+       time_now = _mali_osk_time_get_ns();
+
+       time_period = time_now - period_start_time;
+
+       /* If we are currently busy, update working period up to now */
+       if (work_start_time_gpu != 0) {
+               accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+               work_start_time_gpu = time_now;
+
+               /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+               if (work_start_time_gp != 0) {
+                       accumulated_work_time_gp += (time_now - work_start_time_gp);
+                       work_start_time_gp = time_now;
+               }
+
+               if (work_start_time_pp != 0) {
+                       accumulated_work_time_pp += (time_now - work_start_time_pp);
+                       work_start_time_pp = time_now;
+               }
+       }
+
+       /*
+        * We have two 64-bit values, a dividend and a divisor.
+        * To avoid dependencies to a 64-bit divider, we shift down the two values
+        * equally first.
+        * We shift the dividend up and possibly the divisor down, making the result X in 256.
+        */
+
+       /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+       leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+       shift_val = 32 - leading_zeroes;
+       work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+       work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+       work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+       period_normalized = (u32)(time_period >> shift_val);
+
+       /*
+        * Now, we should report the usage in parts of 256
+        * this means we must shift up the dividend or down the divisor by 8
+        * (we could do a combination, but we just use one for simplicity,
+        * but the end result should be good enough anyway)
+        */
+       if (period_normalized > 0x00FFFFFF) {
+               /* The divisor is so big that it is safe to shift it down */
+               period_normalized >>= 8;
+       } else {
+               /*
+                * The divisor is so small that we can shift up the dividend, without loosing any data.
+                * (dividend is always smaller than the divisor)
+                */
+               work_normalized_gpu <<= 8;
+               work_normalized_gp <<= 8;
+               work_normalized_pp <<= 8;
+       }
+
+       utilization_gpu = work_normalized_gpu / period_normalized;
+       utilization_gp = work_normalized_gp / period_normalized;
+       utilization_pp = work_normalized_pp / period_normalized;
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       window_render_fps = calculate_window_render_fps(time_period);
+#endif
+
+       last_utilization_gpu = utilization_gpu;
+       last_utilization_gp = utilization_gp;
+       last_utilization_pp = utilization_pp;
+
+       if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
+           (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
+               mali_scheduler_hint_enable(MALI_SCHEDULER_HINT_GP_BOUND);
+       } else {
+               mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+       }
+
+       /* starting a new period */
+       accumulated_work_time_gpu = 0;
+       accumulated_work_time_gp = 0;
+       accumulated_work_time_pp = 0;
+       period_start_time = time_now;
+
+       _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+
+       if (NULL != mali_utilization_callback) {
+               struct mali_gpu_utilization_data data = {
+                       utilization_gpu, utilization_gp, utilization_pp,
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+                       window_render_fps, window_render_fps
+#endif
+               };
+               mali_utilization_callback(&data);
+       }
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+       struct _mali_osk_device_data data;
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Use device specific settings (if defined) */
+               if (0 != data.utilization_interval) {
+                       mali_utilization_timeout = data.utilization_interval;
+               }
+               if (NULL != data.utilization_callback) {
+                       mali_utilization_callback = data.utilization_callback;
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Platform has it's own policy \n"));
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+               }
+       }
+#endif
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       if (mali_utilization_callback == NULL) {
+               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: MALI Power Performance Policy Algorithm \n"));
+               mali_utilization_callback = mali_power_performance_policy_callback;
+       }
+#endif
+
+       if (NULL == mali_utilization_callback) {
+               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+       }
+
+       time_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+       if (NULL == time_data_lock) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       num_running_gp_cores = 0;
+       num_running_pp_cores = 0;
+
+       utilization_timer = _mali_osk_timer_init();
+       if (NULL == utilization_timer) {
+               _mali_osk_spinlock_irq_term(time_data_lock);
+               return _MALI_OSK_ERR_FAULT;
+       }
+       _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+       _mali_osk_spinlock_irq_lock(time_data_lock);
+
+       if (timer_running == MALI_TRUE) {
+               timer_running = MALI_FALSE;
+               _mali_osk_spinlock_irq_unlock(time_data_lock);
+               _mali_osk_timer_del(utilization_timer);
+               return;
+       }
+
+       _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+void mali_utilization_term(void)
+{
+       if (NULL != utilization_timer) {
+               _mali_osk_timer_del(utilization_timer);
+               timer_running = MALI_FALSE;
+               _mali_osk_timer_term(utilization_timer);
+               utilization_timer = NULL;
+       }
+
+       _mali_osk_spinlock_irq_term(time_data_lock);
+}
+
+void mali_utilization_gp_start(void)
+{
+       _mali_osk_spinlock_irq_lock(time_data_lock);
+
+       ++num_running_gp_cores;
+       if (1 == num_running_gp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* First GP core started, consider GP busy from now and onwards */
+               work_start_time_gp = time_now;
+
+               if (0 == num_running_pp_cores) {
+                       /*
+                        * There are no PP cores running, so this is also the point
+                        * at which we consider the GPU to be busy as well.
+                        */
+                       work_start_time_gpu = time_now;
+               }
+
+               /* Start a new period (and timer) if needed */
+               if (timer_running != MALI_TRUE) {
+                       timer_running = MALI_TRUE;
+                       period_start_time = time_now;
+
+                       /* Clear session->number_of_window_jobs */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+                       mali_session_max_window_num();
+#endif
+                       _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+                       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+               } else {
+                       _mali_osk_spinlock_irq_unlock(time_data_lock);
+               }
+       } else {
+               /* Nothing to do */
+               _mali_osk_spinlock_irq_unlock(time_data_lock);
+       }
+}
+
+void mali_utilization_pp_start(void)
+{
+       _mali_osk_spinlock_irq_lock(time_data_lock);
+
+       ++num_running_pp_cores;
+       if (1 == num_running_pp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* First PP core started, consider PP busy from now and onwards */
+               work_start_time_pp = time_now;
+
+               if (0 == num_running_gp_cores) {
+                       /*
+                        * There are no GP cores running, so this is also the point
+                        * at which we consider the GPU to be busy as well.
+                        */
+                       work_start_time_gpu = time_now;
+               }
+
+               /* Start a new period (and timer) if needed */
+               if (timer_running != MALI_TRUE) {
+                       timer_running = MALI_TRUE;
+                       period_start_time = time_now;
+
+                       /* Clear session->number_of_window_jobs */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+                       mali_session_max_window_num();
+#endif
+                       _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+                       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+               } else {
+                       _mali_osk_spinlock_irq_unlock(time_data_lock);
+               }
+       } else {
+               /* Nothing to do */
+               _mali_osk_spinlock_irq_unlock(time_data_lock);
+       }
+}
+
+void mali_utilization_gp_end(void)
+{
+       _mali_osk_spinlock_irq_lock(time_data_lock);
+
+       --num_running_gp_cores;
+       if (0 == num_running_gp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* Last GP core ended, consider GP idle from now and onwards */
+               accumulated_work_time_gp += (time_now - work_start_time_gp);
+               work_start_time_gp = 0;
+
+               if (0 == num_running_pp_cores) {
+                       /*
+                        * There are no PP cores running, so this is also the point
+                        * at which we consider the GPU to be idle as well.
+                        */
+                       accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+                       work_start_time_gpu = 0;
+               }
+       }
+
+       _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+void mali_utilization_pp_end(void)
+{
+       _mali_osk_spinlock_irq_lock(time_data_lock);
+
+       --num_running_pp_cores;
+       if (0 == num_running_pp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* Last PP core ended, consider PP idle from now and onwards */
+               accumulated_work_time_pp += (time_now - work_start_time_pp);
+               work_start_time_pp = 0;
+
+               if (0 == num_running_gp_cores) {
+                       /*
+                        * There are no GP cores running, so this is also the point
+                        * at which we consider the GPU to be idle as well.
+                        */
+                       accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+                       work_start_time_gpu = 0;
+               }
+       }
+
+       _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+       return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+       return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+       return last_utilization_pp;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_utilization.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_utilization.h
new file mode 100644 (file)
index 0000000..33c7593
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
+{
+       return (NULL != mali_utilization_callback);
+}
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_vsync.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_kernel_vsync.c
new file mode 100644 (file)
index 0000000..a31a9ac
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+       _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+       MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if defined(CONFIG_MALI400_PROFILING)
+       /*
+        * Manually generate user space events in kernel space.
+        * This saves user space from calling kernel space twice in this case.
+        * We just need to remember to add pid and tid manually.
+        */
+       if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+       }
+
+       if (event==_MALI_UK_VSYNC_EVENT_END_WAIT) {
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+       }
+#endif
+
+       MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+       MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_l2_cache.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_l2_cache.c
new file mode 100644 (file)
index 0000000..0ddaf7a
--- /dev/null
@@ -0,0 +1,581 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+       MALI400_L2_CACHE_REGISTER_SIZE         = 0x0004,
+       MALI400_L2_CACHE_REGISTER_STATUS       = 0x0008,
+       /*unused                               = 0x000C */
+       MALI400_L2_CACHE_REGISTER_COMMAND      = 0x0010, /**< Misc cache commands, e.g. clear */
+       MALI400_L2_CACHE_REGISTER_CLEAR_PAGE   = 0x0014,
+       MALI400_L2_CACHE_REGISTER_MAX_READS    = 0x0018, /**< Limit of outstanding read requests */
+       MALI400_L2_CACHE_REGISTER_ENABLE       = 0x001C, /**< Enable misc cache features */
+       MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command {
+       MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+       /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable {
+       MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+       MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+       MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status {
+       MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+       MALI400_L2_CACHE_STATUS_DATA_BUSY    = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_lock(cache->counter_lock);
+#else
+       _mali_osk_spinlock_lock(cache->counter_lock);
+#endif
+}
+
+static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_unlock(cache->counter_lock);
+#else
+       _mali_osk_spinlock_unlock(cache->counter_lock);
+#endif
+}
+
+static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_lock(cache->command_lock);
+#else
+       _mali_osk_spinlock_lock(cache->command_lock);
+#endif
+}
+
+static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_unlock(cache->command_lock);
+#else
+       _mali_osk_spinlock_unlock(cache->command_lock);
+#endif
+}
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+       struct mali_l2_cache_core *cache = NULL;
+
+       MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+       if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+               return NULL;
+       }
+
+       cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+       if (NULL != cache) {
+               cache->core_id =  mali_global_num_l2_cache_cores;
+               cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+               cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+               cache->pm_domain = NULL;
+               cache->mali_l2_status = MALI_L2_NORMAL;
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+                       MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
+                       MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+                                            resource->description,
+                                            1 << (((cache_size >> 16) & 0xff) - 10),
+                                            1 << ((cache_size >> 8) & 0xff),
+                                            1 << (cache_size & 0xff),
+                                            1 << ((cache_size >> 24) & 0xff)));
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+                       cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#else
+                       cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#endif
+                       if (NULL != cache->command_lock) {
+#ifdef MALI_UPPER_HALF_SCHEDULING
+                               cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#else
+                               cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#endif
+                               if (NULL != cache->counter_lock) {
+                                       mali_l2_cache_reset(cache);
+
+                                       cache->last_invalidated_id = 0;
+
+                                       mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+                                       mali_global_num_l2_cache_cores++;
+
+                                       return cache;
+                               } else {
+                                       MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+                               }
+#ifdef MALI_UPPER_HALF_SCHEDULING
+                               _mali_osk_spinlock_irq_term(cache->command_lock);
+#else
+                               _mali_osk_spinlock_term(cache->command_lock);
+#endif
+                       } else {
+                               MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+                       }
+
+                       mali_hw_core_delete(&cache->hw_core);
+               }
+
+               _mali_osk_free(cache);
+       } else {
+               MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+       u32 i;
+
+       /* reset to defaults */
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_term(cache->counter_lock);
+       _mali_osk_spinlock_irq_term(cache->command_lock);
+#else
+       _mali_osk_spinlock_term(cache->command_lock);
+       _mali_osk_spinlock_term(cache->counter_lock);
+#endif
+
+       mali_hw_core_delete(&cache->hw_core);
+
+       for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+               if (mali_global_l2_cache_cores[i] == cache) {
+                       mali_global_l2_cache_cores[i] = NULL;
+                       mali_global_num_l2_cache_cores--;
+
+                       if (i != mali_global_num_l2_cache_cores) {
+                               /* We removed a l2 cache from the middle of the array -- move the last
+                                * l2 cache to the current position to close the gap */
+                               mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
+                               mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
+                       }
+
+                       break;
+               }
+       }
+
+       _mali_osk_free(cache);
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+       return cache->core_id;
+}
+
+static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+{
+       u32 value = 0; /* disabled src */
+       u32 reg_offset = 0;
+       mali_bool core_is_on;
+
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       core_is_on = mali_l2_cache_lock_power_state(cache);
+
+       mali_l2_cache_counter_lock(cache);
+
+       switch (source_id) {
+       case 0:
+               cache->counter_src0 = counter;
+               reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+               break;
+
+       case 1:
+               cache->counter_src1 = counter;
+               reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+               break;
+
+       default:
+               MALI_DEBUG_ASSERT(0);
+               break;
+       }
+
+       if (MALI_L2_PAUSE == cache->mali_l2_status) {
+               mali_l2_cache_counter_unlock(cache);
+               mali_l2_cache_unlock_power_state(cache);
+               return;
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter) {
+               value = counter;
+       }
+
+       if (MALI_TRUE == core_is_on) {
+               mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
+       }
+
+       mali_l2_cache_counter_unlock(cache);
+       mali_l2_cache_unlock_power_state(cache);
+}
+
+void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+       mali_l2_cache_core_set_counter_internal(cache, 0, counter);
+}
+
+void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+       mali_l2_cache_core_set_counter_internal(cache, 1, counter);
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+       return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+       return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+       MALI_DEBUG_ASSERT(NULL != src0);
+       MALI_DEBUG_ASSERT(NULL != value0);
+       MALI_DEBUG_ASSERT(NULL != src1);
+       MALI_DEBUG_ASSERT(NULL != value1);
+
+       /* Caller must hold the PM lock and know that we are powered on */
+
+       mali_l2_cache_counter_lock(cache);
+
+       if (MALI_L2_PAUSE == cache->mali_l2_status) {
+               mali_l2_cache_counter_unlock(cache);
+
+               return;
+       }
+
+       *src0 = cache->counter_src0;
+       *src1 = cache->counter_src1;
+
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+               *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+       }
+
+       mali_l2_cache_counter_unlock(cache);
+}
+
+static void mali_l2_cache_reset_counters_all(void)
+{
+       int i;
+       u32 value;
+       struct mali_l2_cache_core *cache;
+       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+       for (i = 0; i < num_cores; i++) {
+               cache = mali_l2_cache_core_get_glob_l2_core(i);
+               if (MALI_TRUE == mali_l2_cache_lock_power_state(cache)) {
+                       mali_l2_cache_counter_lock(cache);
+
+                       if (MALI_L2_PAUSE == cache->mali_l2_status) {
+                               mali_l2_cache_counter_unlock(cache);
+                               mali_l2_cache_unlock_power_state(cache);
+                               return;
+                       }
+
+                       /* Reset performance counters */
+                       if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
+                               value = 0;
+                       } else {
+                               value = cache->counter_src0;
+                       }
+                       mali_hw_core_register_write(&cache->hw_core,
+                                                   MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+
+                       if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
+                               value = 0;
+                       } else {
+                               value = cache->counter_src1;
+                       }
+                       mali_hw_core_register_write(&cache->hw_core,
+                                                   MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+
+                       mali_l2_cache_counter_unlock(cache);
+               }
+
+               mali_l2_cache_unlock_power_state(cache);
+       }
+}
+
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+       if (mali_global_num_l2_cache_cores > index) {
+               return mali_global_l2_cache_cores[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+       return mali_global_num_l2_cache_cores;
+}
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+       /* Invalidate cache (just to keep it in a known state at startup) */
+       mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+       mali_l2_cache_counter_lock(cache);
+
+       if (MALI_L2_PAUSE == cache->mali_l2_status) {
+               mali_l2_cache_counter_unlock(cache);
+
+               return;
+       }
+
+       /* Enable cache */
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+       /* Restart any performance counters (if enabled) */
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+       }
+
+       mali_l2_cache_counter_unlock(cache);
+}
+
+void mali_l2_cache_reset_all(void)
+{
+       int i;
+       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+       for (i = 0; i < num_cores; i++) {
+               mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
+       }
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       if (NULL != cache) {
+               cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+               mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+       }
+}
+
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       if (NULL != cache) {
+               /* If the last cache invalidation was done by a job with a higher id we
+                * don't have to flush. Since user space will store jobs w/ their
+                * corresponding memory in sequence (first job #0, then job #1, ...),
+                * we don't have to flush for job n-1 if job n has already invalidated
+                * the cache since we know for sure that job n-1's memory was already
+                * written when job n was started. */
+               if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
+                       return MALI_FALSE;
+               } else {
+                       cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+               }
+
+               mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+       }
+       return MALI_TRUE;
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+       u32 i;
+       for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+               /*additional check*/
+               if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+                       _mali_osk_errcode_t ret;
+                       mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
+                       ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+                       if (_MALI_OSK_ERR_OK != ret) {
+                               MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+                       }
+               }
+               mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+       }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+       u32 i;
+       for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+               /*additional check*/
+               if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+                       u32 j;
+                       for (j = 0; j < num_pages; j++) {
+                               _mali_osk_errcode_t ret;
+                               ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
+                               if (_MALI_OSK_ERR_OK != ret) {
+                                       MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
+                               }
+                       }
+               }
+               mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+       }
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+       return mali_pm_domain_lock_state(cache->pm_domain);
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+       return mali_pm_domain_unlock_state(cache->pm_domain);
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+       int i = 0;
+       const int loop_count = 100000;
+
+       /*
+        * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+        * The L2 cache will ignore commands if it is busy.
+        */
+       mali_l2_cache_command_lock(cache);
+
+       if (MALI_L2_PAUSE == cache->mali_l2_status) {
+               mali_l2_cache_command_unlock(cache);
+               MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for L2 come back\n"));
+
+               MALI_ERROR( _MALI_OSK_ERR_BUSY );
+       }
+
+       /* First, wait for L2 cache command handler to go idle */
+
+       for (i = 0; i < loop_count; i++) {
+               if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+                       break;
+               }
+       }
+
+       if (i == loop_count) {
+               mali_l2_cache_command_unlock(cache);
+               MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+               MALI_ERROR( _MALI_OSK_ERR_FAULT );
+       }
+
+       /* then issue the command */
+       mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+       mali_l2_cache_command_unlock(cache);
+
+       MALI_SUCCESS;
+}
+
+void mali_l2_cache_pause_all(mali_bool pause)
+{
+       int i;
+       struct mali_l2_cache_core * cache;
+       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+       mali_l2_power_status status = MALI_L2_NORMAL;
+
+       if (pause) {
+               status = MALI_L2_PAUSE;
+       }
+
+       for (i = 0; i < num_cores; i++) {
+               cache = mali_l2_cache_core_get_glob_l2_core(i);
+               if (NULL != cache) {
+                       cache->mali_l2_status = status;
+
+                       /* Take and release the counter and command locks to
+                        * ensure there are no active threads that didn't get
+                        * the status flag update.
+                        *
+                        * The locks will also ensure the necessary memory
+                        * barriers are done on SMP systems.
+                        */
+                       mali_l2_cache_counter_lock(cache);
+                       mali_l2_cache_counter_unlock(cache);
+
+                       mali_l2_cache_command_lock(cache);
+                       mali_l2_cache_command_unlock(cache);
+               }
+       }
+
+       /* Resume from pause: do the cache invalidation here to prevent any
+        * loss of cache operation during the pause period to make sure the SW
+        * status is consistent with L2 cache status.
+        */
+       if(!pause) {
+               mali_l2_cache_invalidate_all();
+               mali_l2_cache_reset_counters_all();
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_l2_cache.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_l2_cache.h
new file mode 100644 (file)
index 0000000..85c513c
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES  3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+struct mali_group;
+struct mali_pm_domain;
+
+/* Flags describing state of the L2 */
+typedef enum mali_l2_power_status {
+       MALI_L2_NORMAL, /**< L2 is in normal state and operational */
+       MALI_L2_PAUSE,  /**< L2 may not be accessed and may be powered off */
+} mali_l2_power_status;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core {
+       struct mali_hw_core  hw_core;      /**< Common for all HW cores */
+       u32                  core_id;      /**< Unique core ID */
+#ifdef MALI_UPPER_HALF_SCHEDULING
+       _mali_osk_spinlock_irq_t    *command_lock; /**< Serialize all L2 cache commands */
+       _mali_osk_spinlock_irq_t    *counter_lock; /**< Synchronize L2 cache counter access */
+#else
+       _mali_osk_spinlock_t        *command_lock;
+       _mali_osk_spinlock_t        *counter_lock;
+#endif
+       u32                  counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32                  counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32                  last_invalidated_id;
+       struct mali_pm_domain *pm_domain;
+       mali_l2_power_status   mali_l2_status; /**< Indicate whether the L2 is paused or not */
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+/**
+ * L2 pause is just a status that the L2 can't be accessed temporarily.
+*/
+void mali_l2_cache_pause_all(mali_bool pause);
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t * resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+{
+       cache->pm_domain = domain;
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+void mali_l2_cache_reset_all(void);
+
+struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mem_validation.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mem_validation.c
new file mode 100644 (file)
index 0000000..e7efa53
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+/* MALI_SEC */
+#if 0
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+#define MALI_SEC_MEM_VALIDATION
+#include <linux/cma.h>
+#include <plat/pd.h>
+#include <linux/platform_device.h>
+#endif
+#endif
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct {
+       u32 phys_base;        /**< Mali physical base of the memory, page aligned */
+       u32 size;             /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+/* MALI_SEC */
+#if defined(MALI_SEC_MEM_VALIDATION)
+extern struct platform_device exynos4_device_pd[];
+#endif
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+       /* MALI_SEC */
+#if defined(MALI_SEC_MEM_VALIDATION)
+       struct cma_info mem_info;
+#endif
+
+       /* Check that no other MEM_VALIDATION resources exist */
+       if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
+               MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* MALI_SEC */
+#if defined(MALI_SEC_MEM_VALIDATION)
+       if (cma_info(&mem_info, &exynos4_device_pd[PD_G3D].dev, "fimd")) {
+               MALI_PRINT_ERROR(("Failed to get framebuffer information from CMA\n"));
+               return _MALI_OSK_ERR_FAULT;
+       } else {
+               start = mem_info.lower_bound;
+               size = mem_info.total_size - mem_info.free_size;
+       }
+#endif
+
+       /* Check restrictions on page alignment */
+       if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+           (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+               MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mali_mem_validator.phys_base = start;
+       mali_mem_validator.size = size;
+       MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+                            mali_mem_validator.phys_base, mali_mem_validator.size));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+       if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
+               if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+                   (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+                       if ((phys_addr          >= mali_mem_validator.phys_base) &&
+                           ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+                           (phys_addr          <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+                           ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) ) {
+                               MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+                               return _MALI_OSK_ERR_OK;
+                       }
+               }
+       }
+
+       MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+       return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mem_validation.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mem_validation.h
new file mode 100644 (file)
index 0000000..1eeab38
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu.c
new file mode 100644 (file)
index 0000000..fb3a22b
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command {
+       MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+       MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+       MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
+       MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+       MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+       MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+       MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory_phys   = MALI_INVALID_PAGE;
+static mali_io_address mali_empty_page_directory_virt = NULL;
+
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+       /* allocate the helper pages */
+       mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
+       if(0 == mali_empty_page_directory_phys) {
+               MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
+               mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+               &mali_page_fault_flush_page_directory_mapping,
+               &mali_page_fault_flush_page_table,
+               &mali_page_fault_flush_page_table_mapping,
+               &mali_page_fault_flush_data_page,
+               &mali_page_fault_flush_data_page_mapping)) {
+               MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
+               mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+               mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+               mali_empty_page_directory_virt = NULL;
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+       /* Free global helper pages */
+       mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+       mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+       mali_empty_page_directory_virt = NULL;
+
+       /* Free the page fault flush pages */
+       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping,
+                                      &mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping,
+                                      &mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+       struct mali_mmu_core* mmu = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(resource);
+
+       MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+       mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
+       if (NULL != mmu) {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
+                       if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
+                               if (is_virtual) {
+                                       /* Skip reset and IRQ setup for virtual MMU */
+                                       return mmu;
+                               }
+
+                               if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       mmu->irq = _mali_osk_irq_init(resource->irq,
+                                                                     mali_group_upper_half_mmu,
+                                                                     group,
+                                                                     mali_mmu_probe_trigger,
+                                                                     mali_mmu_probe_ack,
+                                                                     mmu,
+                                                                     resource->description);
+                                       if (NULL != mmu->irq) {
+                                               return mmu;
+                                       } else {
+                                               MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+                                       }
+                               }
+                               mali_group_remove_mmu_core(group);
+                       } else {
+                               MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+                       }
+                       mali_hw_core_delete(&mmu->hw_core);
+               }
+
+               _mali_osk_free(mmu);
+       } else {
+               MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+       }
+
+       return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+       if (NULL != mmu->irq) {
+               _mali_osk_irq_term(mmu->irq);
+       }
+
+       mali_hw_core_delete(&mmu->hw_core);
+       _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+       int i;
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+       }
+}
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+       int i;
+       u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+       if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) ) {
+               MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+               return MALI_TRUE;
+       }
+
+       if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) {
+               MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+               return MALI_FALSE;
+       }
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+               if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+                       break;
+               }
+               if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
+                       break;
+               }
+               if (0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED ))) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+               return MALI_FALSE;
+       }
+
+       if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) {
+               MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+               return MALI_FALSE;
+       }
+
+       return MALI_TRUE;
+}
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+       int i;
+       u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+       if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) {
+               MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+               return;
+       }
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+               MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+               return;
+       }
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+               if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) ) {
+                       break;
+               }
+               if ( status &  MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) {
+                       break;
+               }
+               if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+       MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+       int i;
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+       MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       stall_success = mali_mmu_enable_stall(mmu);
+       if (!stall_success) {
+               err = _MALI_OSK_ERR_BUSY;
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+       if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+               /* no session is active, so just activate the empty page directory */
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
+               mali_mmu_enable_paging(mmu);
+               err = _MALI_OSK_ERR_OK;
+       }
+       mali_mmu_disable_stall(mmu);
+
+       return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+       mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+       if (MALI_FALSE == stall_success) {
+               /* False means that it is in Pagefault state. Not possible to disable_stall then */
+               return MALI_FALSE;
+       }
+
+       mali_mmu_disable_stall(mmu);
+       return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+       /* The MMU must be in stalled or page fault mode, for this writing to work */
+       MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+                                 & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+
+       stall_success = mali_mmu_enable_stall(mmu);
+       MALI_DEBUG_ASSERT(stall_success);
+       MALI_IGNORE(stall_success);
+       mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+       mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
+{
+       mali_bool stall_success;
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+       MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+       stall_success = mali_mmu_enable_stall(mmu);
+
+       /* This function can only be called when the core is idle, so it could not fail. */
+       MALI_DEBUG_ASSERT(stall_success);
+       MALI_IGNORE(stall_success);
+
+       mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
+       mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
+{
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+       stall_success = mali_mmu_enable_stall(mmu);
+       /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+       mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+       if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+       struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+       struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+       u32 int_stat;
+
+       int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+       MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+       if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
+               MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+       } else {
+               MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+       }
+
+       if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
+               MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+       } else {
+               MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+       }
+
+       if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+            (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+       MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu.h
new file mode 100644 (file)
index 0000000..6e1c98c
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+       MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+       MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+       MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+       MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+       MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+       MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+       MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+       MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+       MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt {
+       MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+       MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits {
+       MALI_MMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
+       MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
+       MALI_MMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
+       MALI_MMU_STATUS_BIT_IDLE                = 1 << 3,
+       MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+       MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+       MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core {
+       struct mali_hw_core hw_core; /**< Common for all HW cores */
+       _mali_osk_irq_t *irq;        /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core* mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu_page_directory.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu_page_directory.c
new file mode 100644 (file)
index 0000000..2dd54e3
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(mali_io_address *virt_addr)
+{
+       _mali_osk_errcode_t err;
+       mali_io_address mapping;
+       u32 address;
+
+       if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+               /* Allocation failed */
+               MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
+               return 0;
+       }
+
+       MALI_DEBUG_ASSERT_POINTER( mapping );
+
+       err = fill_page(mapping, 0);
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_mmu_release_table_page(address, mapping);
+               MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
+               return 0;
+       }
+
+       *virt_addr = mapping;
+       return address;
+}
+
+void mali_free_empty_page(u32 address, mali_io_address virt_addr)
+{
+       if (MALI_INVALID_PAGE != address) {
+               mali_mmu_release_table_page(address, virt_addr);
+       }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+        u32 *page_table, mali_io_address *page_table_mapping,
+        u32 *data_page, mali_io_address *data_page_mapping)
+{
+       _mali_osk_errcode_t err;
+
+       err = mali_mmu_get_table_page(data_page, data_page_mapping);
+       if (_MALI_OSK_ERR_OK == err) {
+               err = mali_mmu_get_table_page(page_table, page_table_mapping);
+               if (_MALI_OSK_ERR_OK == err) {
+                       err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
+                       if (_MALI_OSK_ERR_OK == err) {
+                               fill_page(*data_page_mapping, 0);
+                               fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
+                               fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+                               MALI_SUCCESS;
+                       }
+                       mali_mmu_release_table_page(*page_table, *page_table_mapping);
+                       *page_table = MALI_INVALID_PAGE;
+               }
+               mali_mmu_release_table_page(*data_page, *data_page_mapping);
+               *data_page = MALI_INVALID_PAGE;
+       }
+       return err;
+}
+
+void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+                                    u32 *page_table, mali_io_address *page_table_mapping,
+                                    u32 *data_page, mali_io_address *data_page_mapping)
+{
+       if (MALI_INVALID_PAGE != *page_directory) {
+               mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
+               *page_directory = MALI_INVALID_PAGE;
+               *page_directory_mapping = NULL;
+       }
+
+       if (MALI_INVALID_PAGE != *page_table) {
+               mali_mmu_release_table_page(*page_table, *page_table_mapping);
+               *page_table = MALI_INVALID_PAGE;
+               *page_table_mapping = NULL;
+       }
+
+       if (MALI_INVALID_PAGE != *data_page) {
+               mali_mmu_release_table_page(*data_page, *data_page_mapping);
+               *data_page = MALI_INVALID_PAGE;
+               *data_page_mapping = NULL;
+       }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+       int i;
+       MALI_DEBUG_ASSERT_POINTER( mapping );
+
+       for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++) {
+               _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+       }
+       _mali_osk_mem_barrier();
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+       const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+       const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+       _mali_osk_errcode_t err;
+       mali_io_address pde_mapping;
+       u32 pde_phys;
+       int i;
+
+       if (last_pde < first_pde) {
+               MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+       }
+
+       for(i = first_pde; i <= last_pde; i++) {
+               if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+                       /* Page table not present */
+                       MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+                       MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+                       err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+                       if(_MALI_OSK_ERR_OK != err) {
+                               MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+                               return err;
+                       }
+                       pagedir->page_entries_mapped[i] = pde_mapping;
+
+                       /* Update PDE, mark as present */
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
+                                                       pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+                       MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+                       pagedir->page_entries_usage_count[i] = 1;
+               } else {
+                       pagedir->page_entries_usage_count[i]++;
+               }
+       }
+       _mali_osk_write_mem_barrier();
+
+       MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+       int i;
+       const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+       const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+       for (i = first_pte; i <= last_pte; i++) {
+               _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+       }
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+       const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+       const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+       u32 left = size;
+       int i;
+       mali_bool pd_changed = MALI_FALSE;
+       u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+       u32 num_pages_inv = 0;
+       mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+       /* For all page directory entries in range. */
+       for (i = first_pde; i <= last_pde; i++) {
+               u32 size_in_pde, offset;
+
+               MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+               MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+               /* Offset into page table, 0 if mali_address is 4MiB aligned */
+               offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+               if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
+                       size_in_pde = left;
+               } else {
+                       size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+               }
+
+               pagedir->page_entries_usage_count[i]--;
+
+               /* If entire page table is unused, free it */
+               if (0 == pagedir->page_entries_usage_count[i]) {
+                       u32 page_phys;
+                       void *page_virt;
+                       MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+                       /* last reference removed, no need to zero out each PTE  */
+
+                       page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
+                       page_virt = pagedir->page_entries_mapped[i];
+                       pagedir->page_entries_mapped[i] = NULL;
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+
+                       mali_mmu_release_table_page(page_phys, page_virt);
+                       pd_changed = MALI_TRUE;
+               } else {
+                       MALI_DEBUG_ASSERT(num_pages_inv < 2);
+                       if (num_pages_inv < 2) {
+                               pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+                               num_pages_inv++;
+                       } else {
+                               invalidate_all = MALI_TRUE;
+                       }
+
+                       /* If part of the page table is still in use, zero the relevant PTEs */
+                       mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+               }
+
+               left -= size_in_pde;
+               mali_address += size_in_pde;
+       }
+       _mali_osk_write_mem_barrier();
+
+       /* L2 pages invalidation */
+       if (MALI_TRUE == pd_changed) {
+               MALI_DEBUG_ASSERT(num_pages_inv < 3);
+               if (num_pages_inv < 3) {
+                       pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+                       num_pages_inv++;
+               } else {
+                       invalidate_all = MALI_TRUE;
+               }
+       }
+
+       if (invalidate_all) {
+               mali_l2_cache_invalidate_all();
+       } else {
+               mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+       }
+
+       MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+       struct mali_page_directory *pagedir;
+
+       pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+       if(NULL == pagedir) {
+               return NULL;
+       }
+
+       if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
+               _mali_osk_free(pagedir);
+               return NULL;
+       }
+
+       /* Zero page directory */
+       fill_page(pagedir->page_directory_mapped, 0);
+
+       return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+       const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+       int i;
+
+       /* Free referenced page tables and zero PDEs. */
+       for (i = 0; i < num_page_table_entries; i++) {
+               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+                       u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+                       mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
+               }
+       }
+       _mali_osk_write_mem_barrier();
+
+       /* Free the page directory page. */
+       mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
+
+       _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
+{
+       u32 end_address = mali_address + size;
+
+       /* Map physical pages into MMU page tables */
+       for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
+               MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+               _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+                                               MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+                                               phys_address | permission_bits);
+       }
+}
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+/* For instrumented */
+struct dump_info {
+       u32 buffer_left;
+       u32 register_writes_size;
+       u32 page_table_dump_size;
+       u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+       if (NULL != info) {
+               info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+               if (NULL != info->buffer) {
+                       /* check that we have enough space */
+                       if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+                       *info->buffer = where;
+                       info->buffer++;
+
+                       *info->buffer = what;
+                       info->buffer++;
+
+                       info->buffer_left -= sizeof(u32)*2;
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
+{
+       if (NULL != info) {
+               /* 4096 for the page and 4 bytes for the address */
+               const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+               const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+               const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+               info->page_table_dump_size += dump_size_in_bytes;
+
+               if (NULL != info->buffer) {
+                       if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+                       *info->buffer = phys_addr;
+                       info->buffer++;
+
+                       _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+                       info->buffer += page_size_in_elements;
+
+                       info->buffer_left -= dump_size_in_bytes;
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+       MALI_DEBUG_ASSERT_POINTER(pagedir);
+       MALI_DEBUG_ASSERT_POINTER(info);
+
+       if (NULL != pagedir->page_directory_mapped) {
+               int i;
+
+               MALI_CHECK_NO_ERROR(
+                   mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+               );
+
+               for (i = 0; i < 1024; i++) {
+                       if (NULL != pagedir->page_entries_mapped[i]) {
+                               MALI_CHECK_NO_ERROR(
+                                   mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+                                                      _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                                              i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+                               );
+                       }
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+       MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+                                    "set the page directory address", info));
+       MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+       MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+       struct dump_info info = { 0, 0, 0, NULL };
+       struct mali_session_data * session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)(args->ctx);
+
+       MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+       MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+       args->size = info.register_writes_size + info.page_table_dump_size;
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+       struct dump_info info = { 0, 0, 0, NULL };
+       struct mali_session_data * session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session_data = (struct mali_session_data *)(args->ctx);
+
+       info.buffer_left = args->size;
+       info.buffer = args->buffer;
+
+       args->register_writes = info.buffer;
+       MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+       args->page_table_dump = info.buffer;
+       MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+       args->register_writes_size = info.register_writes_size;
+       args->page_table_dump_size = info.page_table_dump_size;
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu_page_directory.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_mmu_page_directory.h
new file mode 100644 (file)
index 0000000..2d2c962
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags {
+       MALI_MMU_FLAGS_PRESENT = 0x01,
+       MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+       MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+       MALI_MMU_FLAGS_OVERRIDE_CACHE  = 0x8,
+       MALI_MMU_FLAGS_WRITE_CACHEABLE  = 0x10,
+       MALI_MMU_FLAGS_WRITE_ALLOCATE  = 0x20,
+       MALI_MMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
+       MALI_MMU_FLAGS_READ_CACHEABLE  = 0x80,
+       MALI_MMU_FLAGS_READ_ALLOCATE  = 0x100,
+       MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+MALI_MMU_FLAGS_PRESENT | \
+       MALI_MMU_FLAGS_READ_PERMISSION |  \
+       MALI_MMU_FLAGS_WRITE_PERMISSION | \
+       MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+       MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+       MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+       MALI_MMU_FLAGS_READ_CACHEABLE | \
+       MALI_MMU_FLAGS_READ_ALLOCATE )
+
+#define MALI_MMU_FLAGS_DEFAULT ( \
+       MALI_MMU_FLAGS_PRESENT | \
+       MALI_MMU_FLAGS_READ_PERMISSION |  \
+       MALI_MMU_FLAGS_WRITE_PERMISSION )
+
+
+struct mali_page_directory {
+       u32 page_directory; /**< Physical address of the memory session's page directory */
+       mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+       mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+       u32   page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range)  */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+
+u32 mali_allocate_empty_page(mali_io_address *virtual);
+void mali_free_empty_page(u32 address, mali_io_address virtual);
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+        u32 *page_table, mali_io_address *page_table_mapping,
+        u32 *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+                                    u32 *page_table, mali_io_address *page_table_mapping,
+                                    u32 *data_page, mali_io_address *data_page_mapping);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk.h
new file mode 100644 (file)
index 0000000..ef60e81
--- /dev/null
@@ -0,0 +1,1335 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#include "mali_osk_types.h"
+#include "mali_osk_specific.h"           /* include any per-os specifics */
+#include "mali_osk_locks.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ *   ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+             ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data );
+
+/** @brief A high priority version of \a _mali_osk_wq_create_work()
+ *
+ * Creates a work object which can be scheduled in the high priority work queue.
+ *
+ * This is unfortunately needed to get low latency scheduling of the Mali cores.  Normally we would
+ * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map
+ * and unmap shared memory when a job is connected to external fences (timelines). And this requires
+ * taking a mutex.
+ *
+ * We do signal a lot of other (low priority) work also as part of the job being finished, and if we
+ * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs
+ * random things instead of starting the next GPU job when the GPU is idle.  So setting the gpu
+ * scheduler to high priority does give a visually more responsive system.
+ *
+ * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri( _mali_osk_wq_work_handler_t handler, void *data );
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work );
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work );
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work );
+
+/** @brief Cause a queued, deferred call of the high priority work handler
+ *
+ * Function is the same as \a _mali_osk_wq_schedule_work() with the only
+ * difference that it runs in a high (real time) priority on the system.
+ *
+ * Should only be used as a substitue for doing the same work in interrupts.
+ *
+ * This is allowed to sleep, but the work should be small since it will block
+ * all other applications.
+*/
+void _mali_osk_wq_schedule_work_high_pri( _mali_osk_wq_work_t *work );
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+/** @brief Create work in the delayed work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, a timer will be start and the \a handler will be called with
+ * \a data as the argument when timer out
+ *
+ * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush()
+ * when no longer needed.
+ */
+_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work without waiting for it to finish
+ *
+ * Note that the \a work callback function may still be running on return from
+ * _mali_osk_wq_delayed_cancel_work_async().
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work and wait for it to finish
+ *
+ * When this function returns, the \a work was either cancelled or it finished running.
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Put \a work task in global workqueue after delay
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ *
+ * If \a work was already on a queue, this function will return without doing anything
+ *
+ * @param work job to be done
+ * @param delay number of jiffies to wait or 0 for immediate execution
+ */
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay);
+
+/** @} */ /* end group _mali_osk_wq */
+
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+
+/** @brief Assign a new val to atomic counter, and return the old atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the new value assign to the atomic counter
+ * @return the old value of the atomic counter
+ */
+u32 _mali_osk_atomic_xchg( _mali_osk_atomic_t *atom, u32 val );
+/** @} */  /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through  _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through  _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through  _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ *  _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async( _mali_osk_timer_t *tim );
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int    _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32    _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32    _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32    _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+
+/** @brief find last (most-significant) bit set
+ *
+ * @param val 32-bit words to count last bit set on
+ * @return last bit set.
+ */
+u32 _mali_osk_fls( u32 val );
+
+/** @} */ /* end group _mali_osk_math */
+
+/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data );
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ * @param timeout timeout in ms
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.  Will return if time
+ * exceeds timeout.
+ */
+void _mali_osk_wait_queue_wait_event_timeout( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data, u32 timeout );
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue );
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Disable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_disable(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
+
+
+/** @brief Release the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ */
+void _mali_osk_pm_dev_ref_dec(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * Will leave the cores powered off if they are already powered off.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ *
+ * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
+
+
+/** @brief Releasing the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+
+/** @brief Block untill pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group  _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+#error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+#error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_bitops.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_bitops.h
new file mode 100644 (file)
index 0000000..877a07c
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+       MALI_DEBUG_ASSERT( bit < 32 );
+       MALI_DEBUG_ASSERT( NULL != addr );
+
+       (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+       MALI_DEBUG_ASSERT( bit < 32 );
+       MALI_DEBUG_ASSERT( NULL != addr );
+
+       (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+       MALI_DEBUG_ASSERT( bit < 32 );
+       return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+       u32 inverted;
+       u32 negated;
+       u32 isolated;
+       u32 leading_zeros;
+
+       /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range  0..31 */
+       inverted = ~value; /* zzz...z1000...0 */
+       /* Using count_trailing_zeros on inverted value -
+        * See ARM System Developers Guide for details of count_trailing_zeros */
+
+       /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+       negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+       /* negated = xxx...x1000...0 */
+
+       isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+       /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+        * Note that the output is zero if value was all 1s */
+
+       leading_zeros = _mali_osk_clz( isolated );
+
+       return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+       _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+       _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+       return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+       u32 total;
+
+       for ( total = 0; total < maxbit; total += 32, ++addr ) {
+               int result;
+               result = _mali_internal_find_first_zero_bit( *addr );
+
+               /* non-negative signifies the bit was found */
+               if ( result >= 0 ) {
+                       total += (u32)result;
+                       break;
+               }
+       }
+
+       /* Now check if we reached maxbit or above */
+       if ( total >= maxbit ) {
+               total = maxbit;
+       }
+
+       return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_list.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_list.h
new file mode 100644 (file)
index 0000000..cf54e81
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+       next->prev = new_entry;
+       new_entry->next = next;
+       new_entry->prev = prev;
+       prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+       next->prev = prev;
+       prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+       list->next = list;
+       list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+       __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+       __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+       __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+       __mali_osk_list_del(list->prev, list->next);
+       _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+       return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+       __mali_osk_list_del(move_entry->prev, move_entry->next);
+       _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list( _mali_osk_list_t *old_list, _mali_osk_list_t *new_list )
+{
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+       if (!_mali_osk_list_empty(old_list)) {
+               new_list->next = old_list->next;
+               new_list->prev = old_list->prev;
+               new_list->next->prev = new_list;
+               new_list->prev->next = new_list;
+               old_list->next = old_list;
+               old_list->prev = old_list;
+       }
+}
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+       _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member)         \
+       for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member),      \
+            tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member);  \
+            &ptr->member != (list);                                      \
+            ptr = tmp,                                                   \
+            tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+
+/** @brief Enumerate a list in reverse order safely
+ *
+ * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that
+ * entries are enumerated in reverse order.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \
+       for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member),      \
+            tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member);  \
+            &ptr->member != (list);                                      \
+            ptr = tmp,                                                   \
+            tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member))
+
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_mali.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_mali.h
new file mode 100644 (file)
index 0000000..dd5a1a2
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+struct _mali_osk_device_data {
+       /* Dedicated GPU memory range (physical). */
+       u32 dedicated_mem_start;
+       u32 dedicated_mem_size;
+
+       /* Shared GPU memory */
+       u32 shared_mem_size;
+
+       /* Frame buffer memory to be accessible by Mali GPU (physical) */
+       u32 fb_start;
+       u32 fb_size;
+
+       /* Max runtime [ms] for jobs */
+       int max_job_runtime;
+
+       /* Report GPU utilization in this interval (specified in ms) */
+       u32 utilization_interval;
+
+       /* Function that will receive periodic GPU utilization numbers */
+       void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+       /*
+        * Mali PMU switch delay.
+        * Only needed if the power gates are connected to the PMU in a high fanout
+        * network. This value is the number of Mali clock cycles it takes to
+        * enable the power gates and turn on the power mesh.
+        * This value will have no effect if a daisy chain implementation is used.
+        */
+       u32 pmu_switch_delay;
+
+       /* Mali Dynamic power domain configuration in sequence from 0-11
+        *  GP  PP0 PP1  PP2  PP3  PP4  PP5  PP6  PP7, L2$0 L2$1 L2$2
+        */
+       u16 pmu_domain_config[12];
+
+       /* Fuction that platform callback for freq tunning, needed when MALI400_POWER_PERFORMANCE_POLICY enabled */
+       int (*set_freq_callback)(unsigned int mhz);
+};
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+u32 _mali_osk_resource_base_address(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Copy as much data as possible from src to dest, do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_profiling.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_profiling.h
new file mode 100644 (file)
index 0000000..c1810ab
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING)  && defined(CONFIG_TRACEPOINTS) */
+
+/* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING)  && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_types.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_osk_types.h
new file mode 100644 (file)
index 0000000..5c65ab2
--- /dev/null
@@ -0,0 +1,455 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_types.h
+ * Defines types of the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_TYPES_H__
+#define __MALI_OSK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+typedef unsigned char      u8;
+typedef signed char        s8;
+typedef unsigned short     u16;
+typedef signed short       s16;
+typedef unsigned int       u32;
+typedef signed int         s32;
+typedef unsigned long long u64;
+#define BITS_PER_LONG (sizeof(long)*8)
+#else
+/* Ensure Linux types u32, etc. are defined */
+#include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+  */
+typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+#define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+#define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER     ((u32)-1)
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum {
+       _MALI_OSK_ERR_OK = 0, /**< Success. */
+       _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+       _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+       _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+       _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+       _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+       _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+       _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+       _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+       _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t;
+typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)( void * arg );
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void  (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t  (*_mali_osk_irq_uhandler_t)( void * arg );
+
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct {
+       union {
+               u32 val;
+               void *obj;
+       } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * - Holding more than one lock of the same order at the same time is not
+ *   allowed.
+ * - Taking a lock of a lower order than the highest-order lock currently held
+ *   is not allowed.
+ *
+ */
+typedef enum {
+       /*  ||    Locks    ||  */
+       /*  ||   must be   ||  */
+       /* _||_  taken in _||_ */
+       /* \  /    this   \  / */
+       /*  \/    order!   \/  */
+
+       _MALI_OSK_LOCK_ORDER_FIRST = 0,
+
+       _MALI_OSK_LOCK_ORDER_SESSIONS,
+       _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+       _MALI_OSK_LOCK_ORDER_MEM_INFO,
+       _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+       _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+       _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
+       _MALI_OSK_LOCK_ORDER_GROUP,
+       _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
+       _MALI_OSK_LOCK_ORDER_SCHEDULER,
+       _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+       _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+       _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+       _MALI_OSK_LOCK_ORDER_DMA_COMMAND,
+       _MALI_OSK_LOCK_ORDER_PROFILING,
+       _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+       _MALI_OSK_LOCK_ORDER_UTILIZATION,
+       _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+       _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+       _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
+       _MALI_OSK_LOCK_ORDER_PMU,
+
+       _MALI_OSK_LOCK_ORDER_LAST,
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * - Any lock can use the order parameter.
+ */
+typedef enum {
+       _MALI_OSK_LOCKFLAG_UNORDERED        = 0x1, /**< Indicate that the order of this lock should not be checked */
+       _MALI_OSK_LOCKFLAG_ORDERED          = 0x2,
+       /** @enum _mali_osk_lock_flags_t
+        *
+        * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks when we call
+ * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can
+ * be used to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * call functions _mali_osk_mutex_rw_wait/signal().
+ *
+ */
+typedef enum {
+       _MALI_OSK_LOCKMODE_UNDEF = -1,  /**< Undefined lock mode. For internal use only */
+       _MALI_OSK_LOCKMODE_RW    = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+       _MALI_OSK_LOCKMODE_RO,          /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+       /** @enum _mali_osk_lock_mode_t
+        *
+        * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private types for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t;
+typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t;
+typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t;
+typedef struct _mali_osk_mutex_s _mali_osk_mutex_t;
+typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes.               */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes.               */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum {
+       _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct {
+       u32 notification_type;   /**< The notification type */
+       u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+       void * result_buffer;   /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg);
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s {
+       struct _mali_osk_list_s *next;
+       struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+/** @} */ /* end group _mali_osk_list */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource {
+       const char * description;       /**< short description of the resource */
+       u32 base;                       /**< Physical base address of the resource, as seen by Mali resources. */
+       u32 irq;                        /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+/** @} */ /* end group _mali_osk_wait_queue */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm.c
new file mode 100644 (file)
index 0000000..8169f10
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+static mali_bool mali_power_on = MALI_FALSE;
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+       _mali_osk_pm_dev_enable();
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+       mali_pm_domain_terminate();
+       _mali_osk_pm_dev_disable();
+}
+
+/* Reset GPU after power up */
+static void mali_pm_reset_gpu(void)
+{
+       /* Reset all L2 caches */
+       mali_l2_cache_reset_all();
+
+       /* Reset all groups */
+       mali_scheduler_reset_all_groups();
+}
+
+void mali_pm_os_suspend(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+       mali_gp_scheduler_suspend();
+       mali_pp_scheduler_suspend();
+       mali_utilization_suspend();
+/* MALI_SEC */
+#if !defined(CONFIG_PM_RUNTIME)
+       mali_group_power_off(MALI_TRUE);
+       mali_power_on = MALI_FALSE;
+#endif
+}
+
+void mali_pm_os_resume(void)
+{
+#if !defined(CONFIG_PM_RUNTIME)
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+       mali_bool do_reset = MALI_FALSE;
+#endif
+
+       MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+/* MALI_SEC */
+/******************************************************************
+ *
+ * <2013. 08. 23>
+ *  In Pegasus prime, PMU is not enabled(Power off) while
+ * system wake up(suspend -> resume).
+ *
+ * Because PMU power is off, GPU does not work.
+ * Therefore code is commented like below.
+ *
+ *****************************************************************/
+#if !defined(CONFIG_PM_RUNTIME)
+       if (MALI_TRUE != mali_power_on)
+       {
+               do_reset = MALI_TRUE;
+       }
+
+       if (NULL != pmu) {
+               mali_pmu_reset(pmu);
+       }
+
+       mali_power_on = MALI_TRUE;
+       _mali_osk_write_mem_barrier();
+
+       if (do_reset) {
+               mali_pm_reset_gpu();
+               mali_group_power_on();
+       }
+#endif
+       mali_gp_scheduler_resume();
+       mali_pp_scheduler_resume();
+}
+
+void mali_pm_runtime_suspend(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+       mali_group_power_off(MALI_TRUE);
+       mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_runtime_resume(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+       mali_bool do_reset = MALI_FALSE;
+
+       MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+
+       if (MALI_TRUE != mali_power_on) {
+               do_reset = MALI_TRUE;
+       }
+
+       if (NULL != pmu) {
+               mali_pmu_reset(pmu);
+       }
+
+       mali_power_on = MALI_TRUE;
+       _mali_osk_write_mem_barrier();
+
+       if (do_reset) {
+               mali_pm_reset_gpu();
+               mali_group_power_on();
+       }
+}
+
+void mali_pm_set_power_is_on(void)
+{
+       mali_power_on = MALI_TRUE;
+}
+
+mali_bool mali_pm_is_power_on(void)
+{
+       return mali_power_on;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm.h
new file mode 100644 (file)
index 0000000..36f0f50
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+
+/* Callback functions registered for the runtime PMM system */
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+void mali_pm_set_power_is_on(void);
+mali_bool mali_pm_is_power_on(void);
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm_domain.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm_domain.c
new file mode 100644 (file)
index 0000000..bd10a41
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = { NULL, };
+
+static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+{
+       _mali_osk_spinlock_irq_lock(domain->lock);
+}
+
+static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+{
+       _mali_osk_spinlock_irq_unlock(domain->lock);
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
+{
+       domain->state = state;
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
+{
+       struct mali_pm_domain* domain = NULL;
+       u32 domain_id = 0;
+
+       domain = mali_pm_domain_get_from_mask(pmu_mask);
+       if (NULL != domain) return domain;
+
+       MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+
+       domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+       if (NULL != domain) {
+               domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
+               if (NULL == domain->lock) {
+                       _mali_osk_free(domain);
+                       return NULL;
+               }
+
+               domain->state = MALI_PM_DOMAIN_ON;
+               domain->pmu_mask = pmu_mask;
+               domain->use_count = 0;
+               domain->group_list = NULL;
+               domain->group_count = 0;
+               domain->l2 = NULL;
+
+               domain_id = _mali_osk_fls(pmu_mask) - 1;
+               /* Verify the domain_id */
+               MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id);
+               /* Verify that pmu_mask only one bit is set */
+               MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask);
+               mali_pm_domains[domain_id] = domain;
+
+               return domain;
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+       }
+
+       return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+       if (NULL == domain) {
+               return;
+       }
+       _mali_osk_spinlock_irq_term(domain->lock);
+
+       _mali_osk_free(domain);
+}
+
+void mali_pm_domain_terminate(void)
+{
+       int i;
+
+       /* Delete all domains */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               mali_pm_domain_delete(mali_pm_domains[i]);
+       }
+}
+
+void mali_pm_domain_add_group(u32 mask, struct mali_group *group)
+{
+       struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
+       struct mali_group *next;
+
+       if (NULL == domain) return;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       ++domain->group_count;
+       next = domain->group_list;
+
+       domain->group_list = group;
+
+       group->pm_domain_list = next;
+
+       mali_group_set_pm_domain(group, domain);
+
+       /* Get pm domain ref after mali_group_set_pm_domain */
+       mali_group_get_pm_domain_ref(group);
+}
+
+void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2)
+{
+       struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
+
+       if (NULL == domain) return;
+
+       MALI_DEBUG_ASSERT(NULL == domain->l2);
+       MALI_DEBUG_ASSERT(NULL != l2);
+
+       domain->l2 = l2;
+
+       mali_l2_cache_set_pm_domain(l2, domain);
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
+{
+       u32 id = 0;
+
+       if (0 == mask) return NULL;
+
+       id = _mali_osk_fls(mask)-1;
+
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+       /* Verify that pmu_mask only one bit is set */
+       MALI_DEBUG_ASSERT((1 << id) == mask);
+
+       return mali_pm_domains[id];
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
+{
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+
+       return mali_pm_domains[id];
+}
+
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+       if (NULL == domain) return;
+
+       mali_pm_domain_lock(domain);
+       ++domain->use_count;
+
+       if (MALI_PM_DOMAIN_ON != domain->state) {
+               /* Power on */
+               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+               MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+
+               if (NULL != pmu) {
+                       _mali_osk_errcode_t err;
+
+                       err = mali_pmu_power_up(pmu, domain->pmu_mask);
+
+                       if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
+                               MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
+                                                 domain->pmu_mask));
+                       }
+               }
+               mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
+       } else {
+               MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+       }
+
+       mali_pm_domain_unlock(domain);
+}
+
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+       if (NULL == domain) return;
+
+       mali_pm_domain_lock(domain);
+       --domain->use_count;
+
+       if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state) {
+               /* Power off */
+               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+               MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+
+               mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+
+               if (NULL != pmu) {
+                       _mali_osk_errcode_t err;
+
+                       err = mali_pmu_power_down(pmu, domain->pmu_mask);
+
+                       if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
+                               MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
+                                                 domain->pmu_mask));
+                       }
+               }
+       }
+       mali_pm_domain_unlock(domain);
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+{
+       mali_bool is_powered = MALI_TRUE;
+
+       /* Take a reference without powering on */
+       if (NULL != domain) {
+               mali_pm_domain_lock(domain);
+               ++domain->use_count;
+
+               if (MALI_PM_DOMAIN_ON != domain->state) {
+                       is_powered = MALI_FALSE;
+               }
+               mali_pm_domain_unlock(domain);
+       }
+
+       if(!_mali_osk_pm_dev_ref_add_no_power_on()) {
+               is_powered = MALI_FALSE;
+       }
+
+       return is_powered;
+}
+
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
+{
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+
+       if (NULL != domain) {
+               mali_pm_domain_ref_put(domain);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm_domain.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pm_domain.h
new file mode 100644 (file)
index 0000000..9b1afc3
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+typedef enum {
+       MALI_PM_DOMAIN_ON,
+       MALI_PM_DOMAIN_OFF,
+} mali_pm_domain_state;
+
+struct mali_pm_domain {
+       mali_pm_domain_state state;
+       _mali_osk_spinlock_irq_t *lock;
+
+       s32 use_count;
+
+       u32 pmu_mask;
+
+       int group_count;
+       struct mali_group *group_list;
+
+       struct mali_l2_cache_core *l2;
+};
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
+
+void mali_pm_domain_add_group(u32 mask, struct mali_group *group);
+
+void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_terminate(void);
+
+/** Get PM domain from domain ID
+ */
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
+
+/* Ref counting */
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+{
+       return domain->l2;
+}
+
+MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+{
+       return domain->state;
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+
+#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
+               NULL != (group); (group) = (group)->pm_domain_list)
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pmu.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pmu.c
new file mode 100644 (file)
index 0000000..9206662
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+u16 mali_pmu_global_domain_config[MALI_MAX_NUMBER_OF_DOMAINS]= {0};
+
+static u32 mali_pmu_detect_mask(void);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+       struct mali_hw_core hw_core;
+       _mali_osk_spinlock_t *lock;
+       u32 registered_cores_mask;
+       u32 active_cores_mask;
+       u32 switch_delay;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+       PMU_REG_ADDR_MGMT_POWER_UP                  = 0x00,     /*< Power up register */
+       PMU_REG_ADDR_MGMT_POWER_DOWN                = 0x04,     /*< Power down register */
+       PMU_REG_ADDR_MGMT_STATUS                    = 0x08,     /*< Core sleep status register */
+       PMU_REG_ADDR_MGMT_INT_MASK                  = 0x0C,     /*< Interrupt mask register */
+       PMU_REG_ADDR_MGMT_INT_RAWSTAT               = 0x10,     /*< Interrupt raw status register */
+       PMU_REG_ADDR_MGMT_INT_CLEAR                 = 0x18,     /*< Interrupt clear register */
+       PMU_REG_ADDR_MGMT_SW_DELAY                  = 0x1C,     /*< Switch delay register */
+       PMU_REGISTER_ADDRESS_SPACE_SIZE             = 0x28,     /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
+{
+       struct mali_pmu_core* pmu;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+       MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+       pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+       if (NULL != pmu) {
+               pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU);
+               if (NULL != pmu->lock) {
+                       pmu->registered_cores_mask = mali_pmu_detect_mask();
+                       pmu->active_cores_mask = pmu->registered_cores_mask;
+
+                       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+                               _mali_osk_errcode_t err;
+                               struct _mali_osk_device_data data = { 0, };
+
+                               err = _mali_osk_device_data_get(&data);
+                               if (_MALI_OSK_ERR_OK == err) {
+                                       pmu->switch_delay = data.pmu_switch_delay;
+                                       mali_global_pmu_core = pmu;
+                                       return pmu;
+                               }
+                               mali_hw_core_delete(&pmu->hw_core);
+                       }
+                       _mali_osk_spinlock_term(pmu->lock);
+               }
+               _mali_osk_free(pmu);
+       }
+
+       return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+       MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+       _mali_osk_spinlock_term(pmu->lock);
+       mali_hw_core_delete(&pmu->hw_core);
+       _mali_osk_free(pmu);
+       mali_global_pmu_core = NULL;
+}
+
+static void mali_pmu_lock(struct mali_pmu_core *pmu)
+{
+       _mali_osk_spinlock_lock(pmu->lock);
+}
+static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+{
+       _mali_osk_spinlock_unlock(pmu->lock);
+}
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu)
+{
+       u32 rawstat;
+       u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+       MALI_DEBUG_ASSERT(pmu);
+
+       /* Wait for the command to complete */
+       do {
+               rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+               --timeout;
+       } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+       MALI_DEBUG_ASSERT(0 < timeout);
+       if (0 == timeout) {
+               return _MALI_OSK_ERR_TIMEOUT;
+       }
+
+       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_power_up_internal(struct mali_pmu_core *pmu, const u32 mask)
+{
+       u32 stat;
+       _mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+       u32 current_domain;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+                               & PMU_REG_VAL_IRQ));
+
+       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       stat &= pmu->registered_cores_mask;
+       if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+       err = mali_pmu_wait_for_command_finish(pmu);
+       if (_MALI_OSK_ERR_OK != err) {
+               return err;
+       }
+#else
+       for (current_domain = 1; current_domain <= pmu->registered_cores_mask; current_domain <<= 1) {
+               if (current_domain & mask & stat) {
+                       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, current_domain);
+
+                       err = mali_pmu_wait_for_command_finish(pmu);
+                       if (_MALI_OSK_ERR_OK != err) {
+                               return err;
+                       }
+               }
+       }
+#endif
+
+#if defined(DEBUG)
+       /* Get power status of cores */
+       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       stat &= pmu->registered_cores_mask;
+
+       MALI_DEBUG_ASSERT(0 == (stat & mask));
+       MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
+#endif /* defined(DEBUG) */
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_power_down_internal(struct mali_pmu_core *pmu, const u32 mask)
+{
+       u32 stat;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+                               & PMU_REG_VAL_IRQ));
+
+       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       stat &= pmu->registered_cores_mask;
+
+       if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
+
+       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+       /* Do not wait for interrupt on Mali-300/400 if all domains are powered off
+        * by our power down command, because the HW will simply not generate an
+        * interrupt in this case.*/
+       if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) {
+               err = mali_pmu_wait_for_command_finish(pmu);
+               if (_MALI_OSK_ERR_OK != err) {
+                       return err;
+               }
+       } else {
+               mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+       }
+#if defined(DEBUG)
+       /* Get power status of cores */
+       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       stat &= pmu->registered_cores_mask;
+
+       MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+       _mali_osk_errcode_t err;
+       u32 cores_off_mask, cores_on_mask, stat;
+
+       mali_pmu_lock(pmu);
+
+       /* Setup the desired defaults */
+       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+       /* Get power status of cores */
+       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+
+       cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
+       cores_on_mask  = pmu->registered_cores_mask &  (stat & pmu->active_cores_mask);
+
+       if (0 != cores_off_mask) {
+               err = mali_pmu_power_down_internal(pmu, cores_off_mask);
+               if (_MALI_OSK_ERR_OK != err) return err;
+       }
+
+       if (0 != cores_on_mask) {
+               err = mali_pmu_power_up_internal(pmu, cores_on_mask);
+               if (_MALI_OSK_ERR_OK != err) return err;
+       }
+
+#if defined(DEBUG)
+       {
+               stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+               stat &= pmu->registered_cores_mask;
+
+               MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
+       }
+#endif /* defined(DEBUG) */
+
+       mali_pmu_unlock(pmu);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0 );
+
+       /* Make sure we have a valid power domain mask */
+       if (mask > pmu->registered_cores_mask) {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       mali_pmu_lock(pmu);
+
+       MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
+
+       pmu->active_cores_mask &= ~mask;
+
+       _mali_osk_pm_dev_ref_add_no_power_on();
+       if (!mali_pm_is_power_on()) {
+               /* Don't touch hardware if all of Mali is powered off. */
+               _mali_osk_pm_dev_ref_dec_no_power_on();
+               mali_pmu_unlock(pmu);
+
+               MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
+
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       err = mali_pmu_power_down_internal(pmu, mask);
+
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+       mali_pmu_unlock(pmu);
+
+       return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0 );
+
+       /* Make sure we have a valid power domain mask */
+       if (mask & ~pmu->registered_cores_mask) {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       mali_pmu_lock(pmu);
+
+       MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
+
+       pmu->active_cores_mask |= mask;
+
+       _mali_osk_pm_dev_ref_add_no_power_on();
+       if (!mali_pm_is_power_on()) {
+               /* Don't touch hardware if all of Mali is powered off. */
+               _mali_osk_pm_dev_ref_dec_no_power_on();
+               mali_pmu_unlock(pmu);
+
+               MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
+
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       err = mali_pmu_power_up_internal(pmu, mask);
+
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+       mali_pmu_unlock(pmu);
+
+       return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+       mali_pmu_lock(pmu);
+
+       /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+       err = mali_pmu_power_down_internal(pmu, pmu->registered_cores_mask);
+
+       mali_pmu_unlock(pmu);
+
+       return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+       mali_pmu_lock(pmu);
+
+       /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+       err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);
+
+       mali_pmu_unlock(pmu);
+       return err;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+       return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(void)
+{
+       int dynamic_config_pp = 0;
+       int dynamic_config_l2 = 0;
+       int i = 0;
+       u32 mask = 0;
+
+       /* Check if PM domain compatible with actually pp core and l2 cache and collection info about domain */
+       mask = mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX);
+
+       for (i = MALI_PP0_DOMAIN_INDEX; i <= MALI_PP7_DOMAIN_INDEX; i++) {
+               mask |= mali_pmu_get_domain_mask(i);
+
+               if (0x0 != mali_pmu_get_domain_mask(i)) {
+                       dynamic_config_pp++;
+               }
+       }
+
+       for (i = MALI_L20_DOMAIN_INDEX; i <= MALI_L22_DOMAIN_INDEX; i++) {
+               mask |= mali_pmu_get_domain_mask(i);
+
+               if (0x0 != mali_pmu_get_domain_mask(i)) {
+                       dynamic_config_l2++;
+               }
+       }
+
+       MALI_DEBUG_PRINT(2, ("Mali PMU: mask 0x%x, pp_core %d, l2_core %d \n", mask, dynamic_config_pp, dynamic_config_l2));
+
+       return mask;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pmu.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pmu.h
new file mode 100644 (file)
index 0000000..59ca95e
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+
+#define MALI_GP_DOMAIN_INDEX   0
+#define MALI_PP0_DOMAIN_INDEX  1
+#define MALI_PP1_DOMAIN_INDEX  2
+#define MALI_PP2_DOMAIN_INDEX  3
+#define MALI_PP3_DOMAIN_INDEX  4
+#define MALI_PP4_DOMAIN_INDEX  5
+#define MALI_PP5_DOMAIN_INDEX  6
+#define MALI_PP6_DOMAIN_INDEX  7
+#define MALI_PP7_DOMAIN_INDEX  8
+#define MALI_L20_DOMAIN_INDEX  9
+#define MALI_L21_DOMAIN_INDEX  10
+#define MALI_L22_DOMAIN_INDEX  11
+
+#define MALI_MAX_NUMBER_OF_DOMAINS     12
+
+/* Record the domain config from the customer or default config */
+extern u16 mali_pmu_global_domain_config[];
+
+static inline u16 mali_pmu_get_domain_mask(u32 index)
+{
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+
+       return mali_pmu_global_domain_config[index];
+}
+
+static inline void mali_pmu_set_domain_mask(u32 index, u16 value)
+{
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+
+       mali_pmu_global_domain_config[index] = value;
+}
+
+static inline void mali_pmu_copy_domain_mask(void *src, u32 len)
+{
+       _mali_osk_memcpy(mali_pmu_global_domain_config, src, len);
+}
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_up.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_down.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp.c
new file mode 100644 (file)
index 0000000..c123746
--- /dev/null
@@ -0,0 +1,573 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_dma.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core* mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+       struct mali_pp_core* core = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+       if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
+               MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+               return NULL;
+       }
+
+       core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+       if (NULL != core) {
+               core->core_id = mali_global_num_pp_cores;
+               core->bcast_id = bcast_id;
+
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
+                       _mali_osk_errcode_t ret;
+
+                       if (!is_virtual) {
+                               ret = mali_pp_reset(core);
+                       } else {
+                               ret = _MALI_OSK_ERR_OK;
+                       }
+
+                       if (_MALI_OSK_ERR_OK == ret) {
+                               ret = mali_group_add_pp_core(group, core);
+                               if (_MALI_OSK_ERR_OK == ret) {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+                                       core->irq = _mali_osk_irq_init(resource->irq,
+                                                                      mali_group_upper_half_pp,
+                                                                      group,
+                                                                      mali_pp_irq_probe_trigger,
+                                                                      mali_pp_irq_probe_ack,
+                                                                      core,
+                                                                      resource->description);
+                                       if (NULL != core->irq) {
+                                               mali_global_pp_cores[mali_global_num_pp_cores] = core;
+                                               mali_global_num_pp_cores++;
+
+                                               return core;
+                                       } else {
+                                               MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+                                       }
+                                       mali_group_remove_pp_core(group);
+                               } else {
+                                       MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+                               }
+                       }
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       } else {
+               MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       _mali_osk_irq_term(core->irq);
+       mali_hw_core_delete(&core->hw_core);
+
+       /* Remove core from global list */
+       for (i = 0; i < mali_global_num_pp_cores; i++) {
+               if (mali_global_pp_cores[i] == core) {
+                       mali_global_pp_cores[i] = NULL;
+                       mali_global_num_pp_cores--;
+
+                       if (i != mali_global_num_pp_cores) {
+                               /* We removed a PP core from the middle of the array -- move the last
+                                * PP core to the current position to close the gap */
+                               mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+                               mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+                       }
+
+                       break;
+               }
+       }
+
+       _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       /* Will only send the stop bus command, and not wait for it to complete */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Send the stop bus command. */
+       mali_pp_stop_bus(core);
+
+       /* Wait for bus to be stopped */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+                       break;
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = {
+       0x0, /* Renderer List Address Register */
+       0x0, /* Renderer State Word Base Address Register */
+       0x0, /* Renderer Vertex Base Register */
+       0x2, /* Feature Enable Register */
+       0x0, /* Z Clear Value Register */
+       0x0, /* Stencil Clear Value Register */
+       0x0, /* ABGR Clear Value 0 Register */
+       0x0, /* ABGR Clear Value 1 Register */
+       0x0, /* ABGR Clear Value 2 Register */
+       0x0, /* ABGR Clear Value 3 Register */
+       0x0, /* Bounding Box Left Right Register */
+       0x0, /* Bounding Box Bottom Register */
+       0x0, /* FS Stack Address Register */
+       0x0, /* FS Stack Size and Initial Value Register */
+       0x0, /* Reserved */
+       0x0, /* Reserved */
+       0x0, /* Origin Offset X Register */
+       0x0, /* Origin Offset Y Register */
+       0x75, /* Subpixel Specifier Register */
+       0x0, /* Tiebreak mode Register */
+       0x0, /* Polygon List Format Register */
+       0x0, /* Scaling Register */
+       0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = {
+       0x0, /* WBx Source Select Register */
+       0x0, /* WBx Target Address Register */
+       0x0, /* WBx Target Pixel Format Register */
+       0x0, /* WBx Target AA Format Register */
+       0x0, /* WBx Target Layout */
+       0x0, /* WBx Target Scanline Length */
+       0x0, /* WBx Target Flags Register */
+       0x0, /* WBx MRT Enable Register */
+       0x0, /* WBx MRT Offset Register */
+       0x0, /* WBx Global Test Enable Register */
+       0x0, /* WBx Global Test Reference Value Register */
+       0x0  /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+       /* Bus must be stopped before calling this function */
+       const u32 reset_invalid_value = 0xC0FFE000;
+       const u32 reset_check_value = 0xC01A0000;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+       MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+       /* Set register to a bogus value. The register will be used to detect when reset is complete */
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+       /* Force core to reset */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+       /* Wait for reset to be complete */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+       }
+
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+       int i;
+       u32 rawstat = 0;
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+                       rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+                       if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
+                               break;
+                       }
+               }
+       }
+
+       if (i == MALI_REG_POLL_COUNT_FAST) {
+               MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+                                 core->hw_core.description, rawstat));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+       mali_pp_reset_async(core);
+       return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
+                                 mali_bool restart_virtual, mali_dma_cmd_buf *buf)
+{
+       u32 relative_address;
+       u32 start_index;
+       u32 nr_of_regs;
+       u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+       u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+       u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+       u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+       u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+       u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Write frame registers */
+
+       /*
+        * There are two frame registers which are different for each sub job:
+        * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+        * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+        */
+       mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+       /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+       if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+               mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+       }
+
+       /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+       relative_address = MALI200_REG_ADDR_RSW;
+       start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+       nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+       mali_dma_write_array_conditional(buf, &core->hw_core,
+                                        relative_address, &frame_registers[start_index],
+                                        nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* MALI200_REG_ADDR_STACK_SIZE */
+       relative_address = MALI200_REG_ADDR_STACK_SIZE;
+       start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+       mali_dma_write_conditional(buf, &core->hw_core,
+                                  relative_address, frame_registers[start_index],
+                                  mali_frame_registers_reset_values[start_index]);
+
+       /* Skip 2 reserved registers */
+
+       /* Write remaining registers */
+       relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+       start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+       nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+       mali_dma_write_array_conditional(buf, &core->hw_core,
+                                        relative_address, &frame_registers[start_index],
+                                        nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* Write WBx registers */
+       if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+               mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+               mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+               mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+               mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+               mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+
+       /* This is the command that starts the core. */
+       mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
+{
+       u32 relative_address;
+       u32 start_index;
+       u32 nr_of_regs;
+       u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+       u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+       u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+       u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+       u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+       u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Write frame registers */
+
+       /*
+        * There are two frame registers which are different for each sub job:
+        * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+        * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+        */
+       mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+       /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+       if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+       }
+
+       /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+       relative_address = MALI200_REG_ADDR_RSW;
+       start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+       nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+       mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+               relative_address, &frame_registers[start_index],
+               nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* MALI200_REG_ADDR_STACK_SIZE */
+       relative_address = MALI200_REG_ADDR_STACK_SIZE;
+       start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+       mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+               relative_address, frame_registers[start_index],
+               mali_frame_registers_reset_values[start_index]);
+
+       /* Skip 2 reserved registers */
+
+       /* Write remaining registers */
+       relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+       start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+       nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+       mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+               relative_address, &frame_registers[start_index],
+               nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* Write WBx registers */
+       if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+
+#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
+       if(job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
+               mali_hw_core_register_write_relaxed(&core->hw_core,  MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
+       }
+#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */
+
+       MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+       /* Adding barrier to make sure all rester writes are finished */
+       _mali_osk_write_mem_barrier();
+
+       /* This is the command that starts the core. */
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+       /* Adding barrier to make sure previous rester writes is finished */
+       _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index)
+{
+       if (mali_global_num_pp_cores > index) {
+               return mali_global_pp_cores[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+       return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+       struct mali_pp_core *core = (struct mali_pp_core *)data;
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+       _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+       struct mali_pp_core *core = (struct mali_pp_core *)data;
+       u32 irq_readout;
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+       if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
+               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+               _mali_osk_mem_barrier();
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+       MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+       u32 val0 = 0;
+       u32 val1 = 0;
+       u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob);
+       u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob);
+#if defined(CONFIG_MALI400_PROFILING)
+       int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+               mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+               mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+       }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+       return n;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp.h
new file mode 100644 (file)
index 0000000..3a5c1ad
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+#include "mali_dma.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES        9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core {
+       struct mali_hw_core  hw_core;           /**< Common for all HW cores */
+       _mali_osk_irq_t     *irq;               /**< IRQ handler */
+       u32                  core_id;           /**< Unique core ID */
+       u32                  bcast_id;          /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t * resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
+
+/**
+ * @brief Add commands to DMA command buffer to start PP job on core.
+ */
+void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
+                                 mali_bool restart_virtual, mali_dma_cmd_buf *buf);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return core->bcast_id;
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+{
+       return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
+{
+       u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_job.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_job.c
new file mode 100644 (file)
index 0000000..2fc5bb7
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_dma.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_pp_scheduler.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER;   /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER;   /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+
+void mali_pp_job_initialize(void)
+{
+       _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0);
+}
+
+void mali_pp_job_terminate(void)
+{
+       _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
+}
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+{
+       struct mali_pp_job *job;
+       u32 perf_counter_flag;
+
+       job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+       if (NULL != job) {
+               if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
+                       goto fail;
+               }
+
+               if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
+                       MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+                       goto fail;
+               }
+
+               if (!mali_pp_job_use_no_notification(job)) {
+                       job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+                       if (NULL == job->finished_notification) goto fail;
+               }
+
+               perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+               /* case when no counters came from user space
+                * so pass the debugfs / DS-5 provided global ones to the job object */
+               if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+                     (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+                       u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
+
+                       /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
+                       job->uargs.perf_counter_src0 = pp_counter_src0;
+                       job->uargs.perf_counter_src1 = pp_counter_src1;
+
+                       /* We only copy the per sub job array if it is enabled with at least one counter */
+                       if (0 < sub_job_count) {
+                               job->perf_counter_per_sub_job_count = sub_job_count;
+                               _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
+                               _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
+                       }
+               }
+
+               _mali_osk_list_init(&job->list);
+               job->session = session;
+               _mali_osk_list_init(&job->session_list);
+               job->id = id;
+
+               job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+               job->pid = _mali_osk_get_pid();
+               job->tid = _mali_osk_get_tid();
+
+               job->num_memory_cookies = job->uargs.num_memory_cookies;
+               if (job->num_memory_cookies > 0) {
+                       u32 size;
+
+                       if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
+                               MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
+                               goto fail;
+                       }
+
+                       size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;
+
+                       job->memory_cookies = _mali_osk_malloc(size);
+                       if (NULL == job->memory_cookies) {
+                               MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+                               goto fail;
+                       }
+
+                       if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) {
+                               MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+                               goto fail;
+                       }
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+                       job->num_dma_bufs = job->num_memory_cookies;
+                       job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
+                       if (NULL == job->dma_bufs) {
+                               MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
+                               goto fail;
+                       }
+#endif
+               }
+
+               /* Prepare DMA command buffer to start job, if it is virtual. */
+               if (mali_pp_job_is_virtual(job)) {
+                       struct mali_pp_core *core;
+                       _mali_osk_errcode_t err =  mali_dma_get_cmd_buf(&job->dma_cmd_buf);
+
+                       if (_MALI_OSK_ERR_OK != err) {
+                               MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
+                               goto fail;
+                       }
+
+                       core = mali_pp_scheduler_get_virtual_pp();
+                       MALI_DEBUG_ASSERT_POINTER(core);
+
+                       mali_pp_job_dma_cmd_prepare(core, job, 0, MALI_FALSE, &job->dma_cmd_buf);
+               }
+
+               if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
+                       /* Not a valid job. */
+                       goto fail;
+               }
+
+               mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
+               mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+               return job;
+       }
+
+fail:
+       if (NULL != job) {
+               mali_pp_job_delete(job);
+       }
+
+       return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+       mali_dma_put_cmd_buf(&job->dma_cmd_buf);
+       if (NULL != job->finished_notification) {
+               _mali_osk_notification_delete(job->finished_notification);
+       }
+
+       _mali_osk_free(job->memory_cookies);
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       /* Unmap buffers attached to job */
+       if (0 < job->num_dma_bufs) {
+               mali_dma_buf_unmap_job(job);
+       }
+
+       _mali_osk_free(job->dma_bufs);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+       _mali_osk_free(job);
+}
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
+{
+       /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+       if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+               return job->uargs.perf_counter_src0;
+       }
+
+       /* Use per sub job counter if enabled... */
+       if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) {
+               return job->perf_counter_per_sub_job_src0[sub_job];
+       }
+
+       /* ...else default to global job counter */
+       return job->uargs.perf_counter_src0;
+}
+
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
+{
+       /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+       if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+               /* Virtual jobs always use the global job counter */
+               return job->uargs.perf_counter_src1;
+       }
+
+       /* Use per sub job counter if enabled... */
+       if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) {
+               return job->perf_counter_per_sub_job_src1[sub_job];
+       }
+
+       /* ...else default to global job counter */
+       return job->uargs.perf_counter_src1;
+}
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter)
+{
+       pp_counter_src0 = counter;
+}
+
+void mali_pp_job_set_pp_counter_global_src1(u32 counter)
+{
+       pp_counter_src1 = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+       if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) {
+               /* increment count since existing counter was disabled */
+               _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == counter) {
+               /* decrement count since new counter is disabled */
+               _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+       }
+
+       /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+       pp_counter_per_sub_job_src0[sub_job] = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+       if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
+               /* increment count since existing counter was disabled */
+               _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == counter) {
+               /* decrement count since new counter is disabled */
+               _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+       }
+
+       /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+       pp_counter_per_sub_job_src1[sub_job] = counter;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src0(void)
+{
+       return pp_counter_src0;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src1(void)
+{
+       return pp_counter_src1;
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+       return pp_counter_per_sub_job_src0[sub_job];
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+       return pp_counter_per_sub_job_src1[sub_job];
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_job.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_job.h
new file mode 100644 (file)
index 0000000..2f2fea4
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#include "mali_dma.h"
+#include "mali_dlbu.h"
+#include "mali_timeline.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+/**
+ * The structure represents a PP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job {
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       _mali_osk_list_t session_list;                     /**< Used to link jobs together in the session job list */
+       _mali_osk_list_t session_fb_lookup_list;           /**< Used to link jobs together from the same frame builder in the session */
+       _mali_uk_pp_start_job_s uargs;                     /**< Arguments from user space */
+       mali_dma_cmd_buf dma_cmd_buf;                      /**< Command buffer for starting job using Mali-450 DMA unit */
+       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
+       u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
+       u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+       u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+       u32 sub_jobs_num;                                  /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+       u32 sub_jobs_started;                              /**< Total number of sub-jobs started (always started in ascending order) */
+       u32 sub_jobs_completed;                            /**< Number of completed sub-jobs in this superjob */
+       u32 sub_job_errors;                                /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+       u32 num_memory_cookies;                            /**< Number of memory cookies attached to job */
+       u32 *memory_cookies;                               /**< Memory cookies attached to job */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       struct mali_dma_buf_attachment **dma_bufs;         /**< Array of DMA-bufs used by job */
+       u32 num_dma_bufs;                                  /**< Number of DMA-bufs used by job */
+#endif
+       struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
+       u32 perf_counter_per_sub_job_count;                /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+       u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+       u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+};
+
+void mali_pp_job_initialize(void);
+void mali_pp_job_terminate(void);
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job);
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job);
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter);
+void mali_pp_job_set_pp_counter_global_src1(u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter);
+
+u32 mali_pp_job_get_pp_counter_global_src0(void);
+u32 mali_pp_job_get_pp_counter_global_src1(void);
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job);
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+       return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
+{
+       return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+       return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+       return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+       return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+       return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+       return job->tid;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+       return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+       return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
+{
+#if defined(CONFIG_MALI450)
+       return 0 == job->uargs.num_cores;
+#else
+       return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+       if (mali_pp_job_is_virtual(job)) {
+               return MALI_DLBU_VIRT_ADDR;
+       } else if (0 == sub_job) {
+               return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+       } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+               return job->uargs.frame_registers_addr_frame[sub_job - 1];
+       }
+
+       return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+       if (0 == sub_job) {
+               return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+       } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+               return job->uargs.frame_registers_addr_stack[sub_job - 1];
+       }
+
+       return 0;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+       return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+       return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+       return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+       job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+       job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+       job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if ( job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+            job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+            job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+          ) {
+               /* At least one output unit active */
+               return MALI_FALSE;
+       }
+
+       /* All outputs are disabled - we can abort the job */
+       return MALI_TRUE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_fb_lookup_id(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       return MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+       return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+       return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+   Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+       u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+       job->sub_jobs_started   += jobs_remaining;
+       job->sub_jobs_completed += jobs_remaining;
+       job->sub_job_errors     += jobs_remaining;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_success(struct mali_pp_job *job)
+{
+       u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+       job->sub_jobs_started   += jobs_remaining;
+       job->sub_jobs_completed += jobs_remaining;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+       return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+       return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+       return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT(job);
+
+       if (0 != job->num_memory_cookies) {
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Assert that we are marking the "first unstarted sub job" as started */
+       MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+       job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+       job->sub_jobs_completed++;
+       if ( MALI_FALSE == success ) {
+               job->sub_job_errors++;
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+       if ( 0 == job->sub_job_errors ) {
+               return MALI_TRUE;
+       }
+       return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+{
+       return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+       return job->uargs.perf_counter_flag;
+}
+
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+       return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+       return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+       job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+       job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+       if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Returns MALI_TRUE if first job should be started after second job.
+ *
+ * @param first First job.
+ * @param second Second job.
+ * @return MALI_TRUE if first job should be started after second job, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *first, struct mali_pp_job *second)
+{
+       MALI_DEBUG_ASSERT_POINTER(first);
+       MALI_DEBUG_ASSERT_POINTER(second);
+
+       /* First job should be started after second job if second job is in progress. */
+       if (0 < second->sub_jobs_started) {
+               return MALI_TRUE;
+       }
+
+       /* First job should be started after second job if first job has a higher job id.  A span is
+          used to handle job id wrapping. */
+       if ((mali_pp_job_get_id(first) - mali_pp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN) {
+               return MALI_TRUE;
+       }
+
+       /* Second job should be started after first job. */
+       return MALI_FALSE;
+}
+
+/**
+ * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
+ *
+ * @param job Job to check.
+ * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+
+       return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
+}
+
+/**
+ * Get PP job's Timeline tracker.
+ *
+ * @param job PP job.
+ * @return Pointer to Timeline tracker for the job.
+ */
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return &(job->tracker);
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_scheduler.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_scheduler.c
new file mode 100644 (file)
index 0000000..846f800
--- /dev/null
@@ -0,0 +1,2066 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_session.h"
+#include "mali_pm_domain.h"
+#include "linux/mali/mali_utgard.h"
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+/* Queue type used for physical and virtual job queues. */
+struct mali_pp_scheduler_job_queue {
+       _MALI_OSK_LIST_HEAD(normal_pri); /* List of jobs with some unscheduled work. */
+       _MALI_OSK_LIST_HEAD(high_pri);   /* List of high priority jobs with some unscheduled work. */
+       u32 depth;                       /* Depth of combined queues. */
+};
+
+/* If dma_buf with map on demand is used, we defer job deletion and job queue if in atomic context,
+ * since both might sleep. */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+static void mali_pp_scheduler_job_queued(void);
+static void mali_pp_scheduler_job_completed(void);
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
+
+static mali_bool mali_pp_scheduler_is_suspended(void *data);
+
+static u32 pp_version = 0;
+
+/* Physical job queue */
+static struct mali_pp_scheduler_job_queue job_queue;
+
+/* Physical groups */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);     /* List of physical groups with working jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);        /* List of physical groups with idle jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);    /* List of disabled physical groups */
+
+/* Virtual job queue (Mali-450 only) */
+static struct mali_pp_scheduler_job_queue virtual_job_queue;
+
+/**
+ * Add job to scheduler queue.
+ *
+ * @param job Job to queue.
+ * @return Schedule mask.
+ */
+static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job);
+
+/* Virtual group (Mali-450 only) */
+static struct mali_group *virtual_group = NULL;                 /* Virtual group (if any) */
+static enum {
+       VIRTUAL_GROUP_IDLE,
+       VIRTUAL_GROUP_WORKING,
+       VIRTUAL_GROUP_DISABLED,
+}
+virtual_group_state = VIRTUAL_GROUP_IDLE;            /* Flag which indicates whether the virtual group is working or idle */
+
+/* Number of physical cores */
+static u32 num_cores = 0;
+
+/* Number of physical cores which are enabled */
+static u32 enabled_cores = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+static _mali_osk_spinlock_irq_t *pp_scheduler_lock = NULL;
+#else
+static _mali_osk_spinlock_t *pp_scheduler_lock = NULL;
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       _mali_osk_spinlock_irq_lock(pp_scheduler_lock);
+#else
+       _mali_osk_spinlock_lock(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+       MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken.\n"));
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock.\n"));
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       _mali_osk_spinlock_irq_unlock(pp_scheduler_lock);
+#else
+       _mali_osk_spinlock_unlock(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+#if defined(DEBUG)
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock)
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() do {} while (0)
+#endif /* defined(DEBUG) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
+static _mali_osk_spinlock_irq_t *pp_scheduler_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
+
+static void mali_pp_scheduler_deferred_job_delete(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
+
+       /* This job object should not be on any lists. */
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+       _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
+
+       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
+
+       _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
+}
+
+static void mali_pp_scheduler_do_job_delete(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+
+       MALI_IGNORE(arg);
+
+       _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
+
+       /*
+        * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
+        * we start deleting the job objects (without any locks held
+        */
+       _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
+
+       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
+               mali_pp_job_delete(job); /* delete the job object itself */
+       }
+}
+
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *pp_scheduler_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_queue_list);
+
+static void mali_pp_scheduler_deferred_job_queue(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
+       _mali_osk_list_addtail(&job->list, &pp_scheduler_job_queue_list);
+       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
+
+       _mali_osk_wq_schedule_work(pp_scheduler_wq_job_queue);
+}
+
+static void mali_pp_scheduler_do_job_queue(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_IGNORE(arg);
+
+       _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
+
+       /*
+        * Quickly "unhook" the jobs pending to be queued, so we can release the lock before
+        * we start queueing the job objects (without any locks held)
+        */
+       _mali_osk_list_move_list(&pp_scheduler_job_queue_list, &list);
+
+       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
+               _mali_osk_list_delinit(&job->list);
+               schedule_mask |= mali_pp_scheduler_queue_job(job);
+       }
+
+       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
+{
+#if defined(CONFIG_MALI450)
+       return NULL != virtual_group;
+#else
+       return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue.normal_pri);
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue.high_pri);
+       job_queue.depth = 0;
+
+       _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue.normal_pri);
+       _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue.high_pri);
+       virtual_job_queue.depth = 0;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       pp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#else
+       pp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+       if (NULL == pp_scheduler_lock) goto cleanup;
+
+       pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == pp_scheduler_working_wait_queue) goto cleanup;
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
+       if (NULL == pp_scheduler_wq_job_delete) goto cleanup;
+
+       pp_scheduler_job_delete_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == pp_scheduler_job_delete_lock) goto cleanup;
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+       pp_scheduler_wq_job_queue = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_queue, NULL);
+       if (NULL == pp_scheduler_wq_job_queue) goto cleanup;
+
+       pp_scheduler_job_queue_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == pp_scheduler_job_queue_lock) goto cleanup;
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+       return _MALI_OSK_ERR_OK;
+
+cleanup:
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+       if (NULL != pp_scheduler_job_queue_lock) {
+               _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
+               pp_scheduler_job_queue_lock = NULL;
+       }
+
+       if (NULL != pp_scheduler_wq_job_queue) {
+               _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
+               pp_scheduler_wq_job_queue = NULL;
+       }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       if (NULL != pp_scheduler_job_delete_lock) {
+               _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
+               pp_scheduler_job_delete_lock = NULL;
+       }
+
+       if (NULL != pp_scheduler_wq_job_delete) {
+               _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+               pp_scheduler_wq_job_delete = NULL;
+       }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+       if (NULL != pp_scheduler_working_wait_queue) {
+               _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+               pp_scheduler_working_wait_queue = NULL;
+       }
+
+       if (NULL != pp_scheduler_lock) {
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+               _mali_osk_spinlock_irq_term(pp_scheduler_lock);
+#else
+               _mali_osk_spinlock_term(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+               pp_scheduler_lock = NULL;
+       }
+
+       return _MALI_OSK_ERR_NOMEM;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+       _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
+       _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
+       _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+       _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       _mali_osk_spinlock_irq_term(pp_scheduler_lock);
+#else
+       _mali_osk_spinlock_term(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+void mali_pp_scheduler_populate(void)
+{
+       struct mali_group *group;
+       struct mali_pp_core *pp_core;
+       u32 num_groups;
+       u32 i;
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       /* Do we have a virtual group? */
+       for (i = 0; i < num_groups; i++) {
+               group = mali_group_get_glob_group(i);
+
+               if (mali_group_is_virtual(group)) {
+                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Found virtual group %p.\n", group));
+
+                       virtual_group = group;
+                       break;
+               }
+       }
+
+       /* Find all the available PP cores */
+       for (i = 0; i < num_groups; i++) {
+               group = mali_group_get_glob_group(i);
+               pp_core = mali_group_get_pp_core(group);
+
+               if (NULL != pp_core && !mali_group_is_virtual(group)) {
+                       if (0 == pp_version) {
+                               /* Retrieve PP version from the first available PP core */
+                               pp_version = mali_pp_core_get_version(pp_core);
+                       }
+
+                       if (mali_pp_scheduler_has_virtual_group()) {
+                               /* Add all physical PP cores to the virtual group */
+                               mali_group_lock(virtual_group);
+                               group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+                               mali_group_add_group(virtual_group, group, MALI_TRUE);
+                               mali_group_unlock(virtual_group);
+                       } else {
+                               _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
+                       }
+
+                       num_cores++;
+               }
+       }
+
+       enabled_cores = num_cores;
+}
+
+void mali_pp_scheduler_depopulate(void)
+{
+       struct mali_group *group, *temp;
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+       MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+
+       /* Delete all groups owned by scheduler */
+       if (mali_pp_scheduler_has_virtual_group()) {
+               mali_group_delete(virtual_group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+               mali_group_delete(group);
+       }
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
+               mali_group_delete(group);
+       }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
+{
+       MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+       if (mali_group_virtual_disable_if_empty(virtual_group)) {
+               MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
+
+               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+               virtual_group_state = VIRTUAL_GROUP_DISABLED;
+       }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
+{
+       MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+       if (mali_group_virtual_enable_if_empty(virtual_group)) {
+               MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
+
+               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
+
+               virtual_group_state = VIRTUAL_GROUP_IDLE;
+       }
+}
+
+static struct mali_pp_job *mali_pp_scheduler_get_job(struct mali_pp_scheduler_job_queue *queue)
+{
+       struct mali_pp_job *job = NULL;
+
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT_POINTER(queue);
+
+       /* Check if we have a normal priority job. */
+       if (!_mali_osk_list_empty(&queue->normal_pri)) {
+               MALI_DEBUG_ASSERT(queue->depth > 0);
+               job = _MALI_OSK_LIST_ENTRY(queue->normal_pri.next, struct mali_pp_job, list);
+       }
+
+       /* Prefer normal priority job if it is in progress. */
+       if (NULL != job && 0 < job->sub_jobs_started) {
+               return job;
+       }
+
+       /* Check if we have a high priority job. */
+       if (!_mali_osk_list_empty(&queue->high_pri)) {
+               MALI_DEBUG_ASSERT(queue->depth > 0);
+               job = _MALI_OSK_LIST_ENTRY(queue->high_pri.next, struct mali_pp_job, list);
+       }
+
+       return job;
+}
+
+/**
+ * Returns a physical job if a physical job is ready to run
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       return mali_pp_scheduler_get_job(&job_queue);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT(job_queue.depth > 0);
+
+       /* Remove job from queue */
+       if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
+               /* All sub jobs have been started: remove job from queue */
+               _mali_osk_list_delinit(&job->list);
+               _mali_osk_list_delinit(&job->session_fb_lookup_list);
+       }
+
+       --job_queue.depth;
+}
+
+/**
+ * Returns a virtual job if a virtual job is ready to run
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_job(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT_POINTER(virtual_group);
+       return mali_pp_scheduler_get_job(&virtual_job_queue);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_virtual_job(struct mali_pp_job *job)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT(virtual_job_queue.depth > 0);
+
+       /* Remove job from queue */
+       _mali_osk_list_delinit(&job->list);
+       _mali_osk_list_delinit(&job->session_fb_lookup_list);
+       --virtual_job_queue.depth;
+}
+
+/**
+ * Checks if the criteria is met for removing a physical core from virtual group
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+       MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
+       MALI_ASSERT_GROUP_LOCKED(virtual_group);
+       /*
+        * The criteria for taking out a physical group from a virtual group are the following:
+        * - There virtual group is idle
+        * - There are currently no physical groups (idle and working)
+        * - There are physical jobs to be scheduled
+        */
+       return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
+              _mali_osk_list_empty(&group_list_idle) &&
+              _mali_osk_list_empty(&group_list_working) &&
+              (NULL != mali_pp_scheduler_get_physical_job());
+}
+
+MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
+{
+       MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+       if (!_mali_osk_list_empty(&group_list_idle)) {
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list.\n"));
+               return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+       } else if (mali_pp_scheduler_has_virtual_group()) {
+               MALI_ASSERT_GROUP_LOCKED(virtual_group);
+               if (mali_pp_scheduler_can_move_virtual_to_physical()) {
+                       struct mali_group *group;
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group.\n"));
+                       group = mali_group_acquire_group(virtual_group);
+
+                       if (mali_pp_scheduler_has_virtual_group()) {
+                               mali_pp_scheduler_disable_empty_virtual();
+                       }
+
+                       return group;
+               }
+       }
+
+       return NULL;
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
+{
+       if (MALI_FALSE == mali_pp_job_use_no_notification(job)) {
+               u32 i;
+               u32 num_counters_to_copy;
+               mali_bool success = mali_pp_job_was_success(job);
+
+               _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
+               _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+               jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+               if (MALI_TRUE == success) {
+                       jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+               } else {
+                       jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+               }
+
+               if (mali_pp_job_is_virtual(job)) {
+                       num_counters_to_copy = num_cores; /* Number of physical cores available */
+               } else {
+                       num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+               }
+
+               for (i = 0; i < num_counters_to_copy; i++) {
+                       jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+                       jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+                       jobres->perf_counter_src0 = mali_pp_job_get_pp_counter_global_src0();
+                       jobres->perf_counter_src1 = mali_pp_job_get_pp_counter_global_src1();
+               }
+
+               mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
+               job->finished_notification = NULL;
+       }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       if (MALI_TRUE == deferred) {
+               /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
+               mali_pp_scheduler_deferred_job_delete(job);
+       } else {
+               mali_pp_job_delete(job);
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
+       mali_pp_job_delete(job);
+#endif
+}
+
+static void mali_pp_scheduler_finalize_job(struct mali_pp_job * job)
+{
+       /* This job object should not be on any lists. */
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+       /* Send notification back to user space */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+       mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
+#else
+       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+#endif
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       if (_MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE & job->uargs.flags) {
+               _mali_osk_atomic_inc(&job->session->number_of_window_jobs);
+       }
+#endif
+
+       mali_pp_scheduler_job_completed();
+}
+
+void mali_pp_scheduler_schedule(void)
+{
+       struct mali_group* physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+       struct mali_pp_job* physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+       u32 physical_sub_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+       int num_physical_jobs_to_start = 0;
+       int i;
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               /* Lock the virtual group since we might have to grab physical groups. */
+               mali_group_lock(virtual_group);
+       }
+
+       mali_pp_scheduler_lock();
+       if (pause_count > 0) {
+               /* Scheduler is suspended, don't schedule any jobs. */
+               mali_pp_scheduler_unlock();
+               if (mali_pp_scheduler_has_virtual_group()) {
+                       mali_group_unlock(virtual_group);
+               }
+               return;
+       }
+
+       /* Find physical job(s) to schedule first. */
+       while (1) {
+               struct mali_group *group;
+               struct mali_pp_job *job;
+               u32 sub_job;
+
+               job = mali_pp_scheduler_get_physical_job();
+               if (NULL == job) {
+                       break; /* No job, early out. */
+               }
+
+               if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) &&
+                   mali_pp_job_is_large_and_unstarted(job) && !_mali_osk_list_empty(&group_list_working)) {
+                       /* Since not all groups are idle, don't schedule yet. */
+                       break;
+               }
+
+               MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+               MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+               MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
+
+               /* Acquire a physical group, either from the idle list or from the virtual group.
+                * In case the group was acquired from the virtual group, it's state will be
+                * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
+               group = mali_pp_scheduler_acquire_physical_group();
+               if (NULL == group) {
+                       /* Could not get a group to run the job on, early out. */
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
+                       break;
+               }
+
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p.\n", group));
+
+               /* Mark sub job as started. */
+               sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+               mali_pp_job_mark_sub_job_started(job, sub_job);
+
+               /* Remove job from queue (if this was the last sub job). */
+               mali_pp_scheduler_dequeue_physical_job(job);
+
+               /* Move group to working list. */
+               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
+
+               /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding. */
+               physical_groups_to_start[num_physical_jobs_to_start] = group;
+               physical_jobs_to_start[num_physical_jobs_to_start] = job;
+               physical_sub_jobs_to_start[num_physical_jobs_to_start] = sub_job;
+               ++num_physical_jobs_to_start;
+
+               MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
+       }
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
+                       /* We have a virtual group and it is idle. */
+
+                       struct mali_pp_job *job;
+
+                       /* Find a virtual job we can start. */
+                       job = mali_pp_scheduler_get_virtual_job();
+
+                       if (NULL != job) {
+                               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+                               MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+                               MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+
+                               /* Mark the one and only sub job as started. */
+                               mali_pp_job_mark_sub_job_started(job, 0);
+
+                               /* Remove job from queue. */
+                               mali_pp_scheduler_dequeue_virtual_job(job);
+
+                               /* Virtual group is now working. */
+                               virtual_group_state = VIRTUAL_GROUP_WORKING;
+
+                               /* We no longer need the scheduler lock, but we still need the virtual lock
+                                * in order to start the virtual job. */
+                               mali_pp_scheduler_unlock();
+
+                               /* Start job. */
+                               mali_group_start_pp_job(virtual_group, job, 0);
+
+                               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
+                                                    mali_pp_job_get_id(job), job, 1,
+                                                    mali_pp_job_get_sub_job_count(job)));
+
+                               mali_group_unlock(virtual_group);
+                       } else {
+                               /* No virtual job to start. */
+                               mali_pp_scheduler_unlock();
+                               mali_group_unlock(virtual_group);
+                       }
+               } else {
+                       /* We have a virtual group, but it is busy or disabled. */
+                       MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE != virtual_group_state);
+
+                       mali_pp_scheduler_unlock();
+                       mali_group_unlock(virtual_group);
+               }
+       } else {
+               /* There is no virtual group. */
+               mali_pp_scheduler_unlock();
+       }
+
+       /* We have now released the scheduler lock, and we are ready to start the physical jobs.
+        * The reason we want to wait until we have released the scheduler lock is that job start
+        * may take quite a bit of time (many registers have to be written). This will allow new
+        * jobs from user space to come in, and post-processing of other PP jobs to happen at the
+        * same time as we start jobs. */
+       for (i = 0; i < num_physical_jobs_to_start; i++) {
+               struct mali_group *group = physical_groups_to_start[i];
+               struct mali_pp_job *job  = physical_jobs_to_start[i];
+               u32 sub_job              = physical_sub_jobs_to_start[i];
+
+               MALI_DEBUG_ASSERT_POINTER(group);
+               MALI_DEBUG_ASSERT_POINTER(job);
+               MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
+               MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+
+               mali_group_lock(group);
+
+               /* Set state to IDLE if group was acquired from the virtual group. */
+               group->state = MALI_GROUP_STATE_IDLE;
+
+               mali_group_start_pp_job(group, job, sub_job);
+
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule).\n",
+                                    mali_pp_job_get_id(job), job, sub_job + 1,
+                                    mali_pp_job_get_sub_job_count(job)));
+
+               mali_group_unlock(group);
+       }
+}
+
+/**
+ * Set group idle.
+ *
+ * If @ref group is the virtual group, nothing is done since the virtual group should be idle
+ * already.
+ *
+ * If @ref group is a physical group we rejoin the virtual group, if it exists.  If not, we move the
+ * physical group to the idle list.
+ *
+ * @note The group and the scheduler must both be locked when entering this function.  Both will be
+ * unlocked before exiting.
+ *
+ * @param group The group to set idle.
+ */
+static void mali_pp_scheduler_set_group_idle_and_unlock(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+       if (mali_group_is_virtual(group)) {
+               /* The virtual group should have been set to non-working already. */
+               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+               mali_pp_scheduler_unlock();
+               mali_group_unlock(group);
+
+               return;
+       } else {
+               if (mali_pp_scheduler_has_virtual_group()) {
+                       /* Rejoin virtual group. */
+
+                       /* We're no longer needed on the scheduler list. */
+                       _mali_osk_list_delinit(&(group->pp_scheduler_list));
+
+                       /* Make sure no interrupts are handled for this group during the transition
+                        * from physical to virtual. */
+                       group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+                       mali_pp_scheduler_unlock();
+                       mali_group_unlock(group);
+
+                       mali_group_lock(virtual_group);
+
+                       if (mali_pp_scheduler_has_virtual_group()) {
+                               mali_pp_scheduler_enable_empty_virtual();
+                       }
+
+                       /* We need to recheck the group state since it is possible that someone has
+                        * modified the group before we locked the virtual group. */
+                       if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
+                               mali_group_add_group(virtual_group, group, MALI_TRUE);
+                       }
+
+                       mali_group_unlock(virtual_group);
+               } else {
+                       /* Move physical group back to idle list. */
+                       _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+                       trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+
+                       mali_pp_scheduler_unlock();
+                       mali_group_unlock(group);
+               }
+       }
+}
+
+/**
+ * Schedule job on locked group.
+ *
+ * @note The group and the scheduler must both be locked when entering this function.  Both will be
+ * unlocked before exiting.
+ *
+ * @param group The group to schedule on.
+ */
+static void mali_pp_scheduler_schedule_on_group_and_unlock(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+       if (mali_group_is_virtual(group)) {
+               /* Now that the virtual group is idle, check if we should reconfigure. */
+
+               struct mali_pp_job *virtual_job = NULL;
+               struct mali_pp_job *physical_job = NULL;
+               struct mali_group *physical_group = NULL;
+               u32 physical_sub_job = 0;
+
+               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+               if (mali_pp_scheduler_can_move_virtual_to_physical()) {
+                       /* There is a runnable physical job and we can acquire a physical group. */
+                       physical_job = mali_pp_scheduler_get_physical_job();
+                       MALI_DEBUG_ASSERT_POINTER(physical_job);
+                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
+
+                       /* Mark sub job as started. */
+                       physical_sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
+                       mali_pp_job_mark_sub_job_started(physical_job, physical_sub_job);
+
+                       /* Remove job from queue (if this was the last sub job). */
+                       mali_pp_scheduler_dequeue_physical_job(physical_job);
+
+                       /* Acquire a physical group from the virtual group.  Its state will
+                        * be LEAVING_VIRTUAL and must be set to IDLE before it can be
+                        * used. */
+                       physical_group = mali_group_acquire_group(virtual_group);
+
+                       /* Move physical group to the working list, as we will soon start a job on it. */
+                       _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
+
+                       mali_pp_scheduler_disable_empty_virtual();
+               }
+
+               /* Get next virtual job. */
+               virtual_job = mali_pp_scheduler_get_virtual_job();
+               if (NULL != virtual_job && VIRTUAL_GROUP_IDLE == virtual_group_state) {
+                       /* There is a runnable virtual job. */
+
+                       MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(virtual_job));
+                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(virtual_job));
+                       MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(virtual_job));
+
+                       mali_pp_job_mark_sub_job_started(virtual_job, 0);
+
+                       /* Remove job from queue. */
+                       mali_pp_scheduler_dequeue_virtual_job(virtual_job);
+
+                       /* Virtual group is now working. */
+                       virtual_group_state = VIRTUAL_GROUP_WORKING;
+
+                       mali_pp_scheduler_unlock();
+
+                       /* Start job. */
+                       mali_group_start_pp_job(group, virtual_job, 0);
+
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done).\n",
+                                            mali_pp_job_get_id(virtual_job), virtual_job, 1,
+                                            mali_pp_job_get_sub_job_count(virtual_job)));
+               } else {
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+                       trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
+#endif
+
+                       mali_pp_scheduler_unlock();
+               }
+
+               /* Releasing the virtual group lock that was held when entering the function. */
+               mali_group_unlock(group);
+
+               /* Start a physical job (if we acquired a physical group earlier). */
+               if (NULL != physical_job && NULL != physical_group) {
+                       mali_group_lock(physical_group);
+
+                       /* Change the group state from LEAVING_VIRTUAL to IDLE to complete the transition. */
+                       physical_group->state = MALI_GROUP_STATE_IDLE;
+
+                       /* Start job. */
+                       mali_group_start_pp_job(physical_group, physical_job, physical_sub_job);
+
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
+                                            mali_pp_job_get_id(physical_job), physical_job, physical_sub_job + 1,
+                                            mali_pp_job_get_sub_job_count(physical_job)));
+
+                       mali_group_unlock(physical_group);
+               }
+       } else {
+               /* Physical group. */
+               struct mali_pp_job *job = NULL;
+               u32 sub_job = 0;
+
+               job = mali_pp_scheduler_get_physical_job();
+               if (NULL != job) {
+                       /* There is a runnable physical job. */
+                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+                       /* Mark sub job as started. */
+                       sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+                       mali_pp_job_mark_sub_job_started(job, sub_job);
+
+                       /* Remove job from queue (if this was the last sub job). */
+                       mali_pp_scheduler_dequeue_physical_job(job);
+
+                       mali_pp_scheduler_unlock();
+
+                       /* Group is already on the working list, so start the new job. */
+                       mali_group_start_pp_job(group, job, sub_job);
+
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
+                                            mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+
+                       mali_group_unlock(group);
+               } else {
+                       mali_pp_scheduler_set_group_idle_and_unlock(group);
+               }
+       }
+}
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half)
+{
+       mali_bool job_is_done = MALI_FALSE;
+       mali_bool schedule_on_group = MALI_FALSE;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s).\n",
+                            mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
+                            mali_pp_job_get_id(job),
+                            job, sub_job + 1,
+                            mali_pp_job_get_sub_job_count(job),
+                            success ? "success" : "failure"));
+
+       MALI_ASSERT_GROUP_LOCKED(group);
+       mali_pp_scheduler_lock();
+
+       mali_pp_job_mark_sub_job_completed(job, success);
+
+       MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job) == mali_group_is_virtual(group));
+
+       job_is_done = mali_pp_job_is_complete(job);
+
+       if (job_is_done) {
+               /* Job is removed from these lists when the last sub job is scheduled. */
+               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+               /* Remove job from session list. */
+               _mali_osk_list_delinit(&job->session_list);
+
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X).\n",
+                                    mali_pp_job_is_virtual(job) ? "virtual" : "physical",
+                                    mali_pp_job_get_id(job), job));
+
+               mali_pp_scheduler_unlock();
+
+               /* Release tracker.  If other trackers are waiting on this tracker, this could
+                * trigger activation.  The returned scheduling mask can be used to determine if we
+                * have to schedule GP, PP or both. */
+               schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+               mali_pp_scheduler_lock();
+       }
+
+       if (mali_group_is_virtual(group)) {
+               /* Obey the policy. */
+               virtual_group_state = VIRTUAL_GROUP_IDLE;
+       }
+
+       /* If paused, then this was the last job, so wake up sleeping workers and return. */
+       if (pause_count > 0) {
+               /* Wake up sleeping workers. Their wake-up condition is that
+                * num_slots == num_slots_idle, so unless we are done working, no
+                * threads will actually be woken up.
+                */
+               if (!mali_group_is_virtual(group)) {
+                       /* Move physical group to idle list. */
+                       _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+               }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+               trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+
+               _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+
+               mali_pp_scheduler_unlock();
+               mali_group_unlock(group);
+
+               if (job_is_done) {
+                       /* Return job to user and delete it. */
+                       mali_pp_scheduler_finalize_job(job);
+               }
+
+               /* A GP job might be queued by tracker release above,
+                * make sure GP scheduler gets a chance to schedule this (if possible)
+                */
+               mali_scheduler_schedule_from_mask(schedule_mask & ~MALI_SCHEDULER_MASK_PP, in_upper_half);
+
+               return;
+       }
+
+       /* Since this group just finished running a job, we can reschedule a new job on it
+        * immediately. */
+
+       /* By default, don't schedule on group. */
+       schedule_on_group = MALI_FALSE;
+
+       if (mali_group_is_virtual(group)) {
+               /* Always schedule immediately on virtual group. */
+               schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+               schedule_on_group = MALI_TRUE;
+       } else if (0 < job_queue.depth && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP) || _mali_osk_list_empty(&group_list_idle))) {
+               struct mali_pp_job *next_job = NULL;
+
+               next_job = mali_pp_scheduler_get_physical_job();
+               MALI_DEBUG_ASSERT_POINTER(next_job);
+
+               /* If no new jobs have been queued or if this group is the only idle group, we can
+                * schedule immediately on this group, unless we are GP bound and the next job would
+                * benefit from all its sub jobs being started concurrently. */
+
+               if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) && mali_pp_job_is_large_and_unstarted(next_job)) {
+                       /* We are GP bound and the job would benefit from all sub jobs being started
+                        * concurrently.  Postpone scheduling until after group has been unlocked. */
+                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
+                       schedule_on_group = MALI_FALSE;
+               } else {
+                       /* Schedule job immediately since we are not GP bound. */
+                       schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+                       schedule_on_group = MALI_TRUE;
+               }
+       }
+
+       if (schedule_on_group) {
+               /* Schedule a new job on this group. */
+               mali_pp_scheduler_schedule_on_group_and_unlock(group);
+       } else {
+               /* Set group idle.  Will rejoin virtual group, under appropriate conditions. */
+               mali_pp_scheduler_set_group_idle_and_unlock(group);
+       }
+
+       if (!schedule_on_group || MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
+               if (MALI_SCHEDULER_MASK_PP & schedule_mask) {
+                       /* Schedule PP directly. */
+                       mali_pp_scheduler_schedule();
+                       schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+               }
+
+               /* Schedule other jobs that were activated. */
+               mali_scheduler_schedule_from_mask(schedule_mask, in_upper_half);
+       }
+
+       if (job_is_done) {
+               /* Return job to user and delete it. */
+               mali_pp_scheduler_finalize_job(job);
+       }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+       mali_pp_scheduler_lock();
+       pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+       mali_pp_scheduler_unlock();
+
+       /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+        * mali_pp_scheduler_suspended() function will be called. This will return true
+        * if state is idle and pause_count > 0, so if the core is active this
+        * will not do anything.
+        */
+       _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended, NULL);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+       mali_pp_scheduler_lock();
+       pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+       mali_pp_scheduler_unlock();
+       if (0 == pause_count) {
+               mali_pp_scheduler_schedule();
+       }
+}
+
+mali_timeline_point mali_pp_scheduler_submit_job(struct mali_session_data *session, struct mali_pp_job *job)
+{
+       mali_timeline_point point;
+       u32 fb_lookup_id = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       mali_pp_scheduler_lock();
+
+       fb_lookup_id = mali_pp_job_get_fb_lookup_id(job);
+       MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+       /* Adding job to the lookup list used to quickly discard writeback units of queued jobs. */
+       _mali_osk_list_addtail(&job->session_fb_lookup_list, &session->pp_job_fb_lookup_list[fb_lookup_id]);
+
+       mali_pp_scheduler_unlock();
+
+       /* We hold a PM reference for every job we hold queued (and running) */
+       _mali_osk_pm_dev_ref_add();
+
+       /* Add job to Timeline system. */
+       point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_PP);
+
+       return point;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       mali_timeline_point point;
+       u32 __user *timeline_point_ptr = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data*)ctx;
+
+       job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+       if (NULL == job) {
+               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+
+       point = mali_pp_scheduler_submit_job(session, job);
+       job = NULL;
+
+       if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+               /* Let user space know that something failed after the job was started. */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       _mali_uk_pp_and_gp_start_job_s kargs;
+       struct mali_pp_job *pp_job;
+       struct mali_gp_job *gp_job;
+       u32 __user *timeline_point_ptr = NULL;
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+
+       session = (struct mali_session_data *) ctx;
+
+       if (0 != _mali_osk_copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       pp_job = mali_pp_job_create(session, kargs.pp_args, mali_scheduler_get_new_id());
+       if (NULL == pp_job) {
+               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       gp_job = mali_gp_job_create(session, kargs.gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
+       if (NULL == gp_job) {
+               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+               mali_pp_job_delete(pp_job);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       timeline_point_ptr = (u32 __user *) pp_job->uargs.timeline_point_ptr;
+
+       /* Submit GP job. */
+       mali_gp_scheduler_submit_job(session, gp_job);
+       gp_job = NULL;
+
+       /* Submit PP job. */
+       point = mali_pp_scheduler_submit_job(session, pp_job);
+       pp_job = NULL;
+
+       if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+               /* Let user space know that something failed after the jobs were started. */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+       args->number_of_total_cores = num_cores;
+       args->number_of_enabled_cores = enabled_cores;
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_pp_scheduler_get_num_cores_total(void)
+{
+       return num_cores;
+}
+
+u32 mali_pp_scheduler_get_num_cores_enabled(void)
+{
+       return enabled_cores;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+       args->version = pp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+       u32 fb_lookup_id;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+       session = (struct mali_session_data*)args->ctx;
+
+       fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+       mali_pp_scheduler_lock();
+
+       /* Iterate over all jobs for given frame builder_id. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pp_job_fb_lookup_list[fb_lookup_id], struct mali_pp_job, session_fb_lookup_list) {
+               MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+               if (mali_pp_job_get_frame_builder_id(job) == (u32) args->fb_id) {
+                       MALI_DEBUG_CODE(disable_mask |= 0xD<<(4*3));
+                       if (args->wb0_memory == job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
+                               MALI_DEBUG_CODE(disable_mask |= 0x1<<(4*1));
+                               mali_pp_job_disable_wb0(job);
+                       }
+                       if (args->wb1_memory == job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
+                               MALI_DEBUG_CODE(disable_mask |= 0x2<<(4*2));
+                               mali_pp_job_disable_wb1(job);
+                       }
+                       if (args->wb2_memory == job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
+                               MALI_DEBUG_CODE(disable_mask |= 0x3<<(4*3));
+                               mali_pp_job_disable_wb2(job);
+                       }
+                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", disable_mask));
+               } else {
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+               }
+       }
+
+       mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+       u32 i = 0;
+       struct mali_pp_job *job, *tmp_job;
+       struct mali_group *group, *tmp_group;
+       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08X.\n", session));
+
+       mali_pp_scheduler_lock();
+
+       /* Find all jobs from the aborting session. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->pp_job_list, struct mali_pp_job, session_list) {
+               /* Remove job from queue. */
+               if (mali_pp_job_is_virtual(job)) {
+                       MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+                       if (0 == mali_pp_job_get_first_unstarted_sub_job(job)) {
+                               --virtual_job_queue.depth;
+                       }
+               } else {
+                       job_queue.depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
+               }
+
+               _mali_osk_list_delinit(&job->list);
+               _mali_osk_list_delinit(&job->session_fb_lookup_list);
+
+               mali_pp_job_mark_unstarted_failed(job);
+
+               if (mali_pp_job_is_complete(job)) {
+                       /* Job is complete, remove from session list. */
+                       _mali_osk_list_delinit(&job->session_list);
+
+                       /* Move job to local list for release and deletion. */
+                       _mali_osk_list_add(&job->list, &removed_jobs);
+
+                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborted PP job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Keeping partially started PP job %u (0x%08X) in session.\n", mali_pp_job_get_id(job), job));
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list) {
+               groups[i++] = group;
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list) {
+               groups[i++] = group;
+       }
+
+       mali_pp_scheduler_unlock();
+
+       /* Release and delete all found jobs from the aborting session. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
+               mali_timeline_tracker_release(&job->tracker);
+               mali_pp_job_delete(job);
+               mali_pp_scheduler_job_completed();
+       }
+
+       /* Abort any running jobs from the session. */
+       while (i > 0) {
+               mali_group_abort_session(groups[--i], session);
+       }
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               mali_group_abort_session(virtual_group, session);
+       }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void *data)
+{
+       mali_bool ret;
+
+       /* This callback does not use the data pointer. */
+       MALI_IGNORE(data);
+
+       mali_pp_scheduler_lock();
+
+       ret = pause_count > 0
+             && _mali_osk_list_empty(&group_list_working)
+             && VIRTUAL_GROUP_WORKING != virtual_group_state;
+
+       mali_pp_scheduler_unlock();
+
+       return ret;
+}
+
+struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void)
+{
+       if (mali_pp_scheduler_has_virtual_group()) {
+               return mali_group_get_pp_core(virtual_group);
+       } else {
+               return NULL;
+       }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+       n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue.normal_pri) ? "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue.high_pri) ? "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               n += mali_group_dump_state(virtual_group, buf + n, size -n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+       return n;
+}
+#endif
+
+/* This function is intended for power on reset of all cores.
+ * No locking is done for the list iteration, which can only be safe if the
+ * scheduler is paused and all cores idle. That is always the case on init and
+ * power on. */
+void mali_pp_scheduler_reset_all_groups(void)
+{
+       struct mali_group *group, *temp;
+       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+       s32 i = 0;
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               mali_group_lock(virtual_group);
+               mali_group_reset(virtual_group);
+               mali_group_unlock(virtual_group);
+       }
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+       MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+       mali_pp_scheduler_lock();
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+               groups[i++] = group;
+       }
+       mali_pp_scheduler_unlock();
+
+       while (i > 0) {
+               group = groups[--i];
+
+               mali_group_lock(group);
+               mali_group_reset(group);
+               mali_group_unlock(group);
+       }
+}
+
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+       struct mali_group *group, *temp;
+       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+       s32 i = 0;
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               mali_group_zap_session(virtual_group, session);
+       }
+
+       mali_pp_scheduler_lock();
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
+               groups[i++] = group;
+       }
+       mali_pp_scheduler_unlock();
+
+       while (i > 0) {
+               mali_group_zap_session(groups[--i], session);
+       }
+}
+
+/* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
+ * before calling this function to avoid Mali powering down as HW is accessed.
+ */
+static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_group_lock(group);
+
+       if (MALI_GROUP_STATE_DISABLED != group->state) {
+               mali_group_unlock(group);
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled.\n", group));
+               return;
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p.\n", group));
+
+       mali_pp_scheduler_lock();
+
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+       ++enabled_cores;
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               mali_bool update_hw;
+
+               /* Add group to virtual group. */
+               _mali_osk_list_delinit(&(group->pp_scheduler_list));
+               group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+               mali_pp_scheduler_unlock();
+               mali_group_unlock(group);
+
+               mali_group_lock(virtual_group);
+
+               update_hw = mali_pm_is_power_on();
+               /* Get ref of group domain */
+               mali_group_get_pm_domain_ref(group);
+
+               MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+                                 MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+               if (update_hw) {
+                       mali_group_lock(group);
+                       mali_group_power_on_group(group);
+                       mali_group_reset(group);
+                       mali_group_unlock(group);
+               }
+
+               mali_pp_scheduler_enable_empty_virtual();
+               mali_group_add_group(virtual_group, group, update_hw);
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Added to virtual group.\n", group));
+
+               mali_group_unlock(virtual_group);
+       } else {
+               /* Get ref of group domain */
+               mali_group_get_pm_domain_ref(group);
+
+               MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+                                 MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+               /* Put group on idle list. */
+               if (mali_pm_is_power_on()) {
+                       mali_group_power_on_group(group);
+                       mali_group_reset(group);
+               }
+
+               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+               group->state = MALI_GROUP_STATE_IDLE;
+
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Now on idle list.\n", group));
+               mali_pp_scheduler_unlock();
+               mali_group_unlock(group);
+       }
+}
+
+void mali_pp_scheduler_enable_group(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       _mali_osk_pm_dev_ref_add_no_power_on();
+
+       mali_pp_scheduler_enable_group_internal(group);
+
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+
+       /* Pick up any jobs that might have been queued if all PP groups were disabled. */
+       mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
+{
+       if (mali_pp_scheduler_has_virtual_group()) {
+               mali_group_lock(virtual_group);
+
+               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+               if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
+                       /* The group was in the process of being added to the virtual group.  We
+                        * only need to change the state to reverse this. */
+                       group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+               } else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
+                       /* Remove group from virtual group.  The state of the group will be
+                        * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
+                       mali_group_remove_group(virtual_group, group);
+
+                       mali_pp_scheduler_disable_empty_virtual();
+               }
+
+               mali_group_unlock(virtual_group);
+       }
+
+       mali_group_lock(group);
+       mali_pp_scheduler_lock();
+
+       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE            == group->state
+                            || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+                            || MALI_GROUP_STATE_DISABLED        == group->state);
+
+       if (MALI_GROUP_STATE_DISABLED == group->state) {
+               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled.\n", group));
+       } else {
+               MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p.\n", group));
+
+               --enabled_cores;
+               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
+               group->state = MALI_GROUP_STATE_DISABLED;
+
+               mali_group_power_off_group(group, MALI_TRUE);
+               mali_group_put_pm_domain_ref(group);
+       }
+
+       mali_pp_scheduler_unlock();
+       mali_group_unlock(group);
+}
+
+void mali_pp_scheduler_disable_group(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_pp_scheduler_suspend();
+
+       _mali_osk_pm_dev_ref_add_no_power_on();
+
+       mali_pp_scheduler_disable_group_internal(group);
+
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+
+       mali_pp_scheduler_resume();
+}
+
+static void mali_pp_scheduler_notify_core_change(u32 num_cores)
+{
+       mali_bool done = MALI_FALSE;
+
+       if (mali_is_mali450()) {
+               return;
+       }
+
+       /*
+        * This function gets a bit complicated because we can't hold the session lock while
+        * allocating notification objects.
+        */
+
+       while (!done) {
+               u32 i;
+               u32 num_sessions_alloc;
+               u32 num_sessions_with_lock;
+               u32 used_notification_objects = 0;
+               _mali_osk_notification_t **notobjs;
+
+               /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+               num_sessions_alloc = mali_session_get_count();
+               if (0 == num_sessions_alloc) {
+                       /* No sessions to report to */
+                       return;
+               }
+
+               notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+               if (NULL == notobjs) {
+                       MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+                       /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+                       return;
+               }
+
+               for (i = 0; i < num_sessions_alloc; i++) {
+                       notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+                       if (NULL != notobjs[i]) {
+                               _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+                               data->number_of_enabled_cores = num_cores;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+                       }
+               }
+
+               mali_session_lock();
+
+               /* number of sessions will not change while we hold the lock */
+               num_sessions_with_lock = mali_session_get_count();
+
+               if (num_sessions_alloc >= num_sessions_with_lock) {
+                       /* We have allocated enough notification objects for all the sessions atm */
+                       struct mali_session_data *session, *tmp;
+                       MALI_SESSION_FOREACH(session, tmp, link) {
+                               MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+                               if (NULL != notobjs[used_notification_objects]) {
+                                       mali_session_send_notification(session, notobjs[used_notification_objects]);
+                                       notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+                               }
+                               used_notification_objects++;
+                       }
+                       done = MALI_TRUE;
+               }
+
+               mali_session_unlock();
+
+               /* Delete any remaining/unused notification objects */
+               for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+                       if (NULL != notobjs[used_notification_objects]) {
+                               _mali_osk_notification_delete(notobjs[used_notification_objects]);
+                       }
+               }
+
+               _mali_osk_free(notobjs);
+       }
+}
+
+static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
+{
+       MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+
+       _mali_osk_pm_dev_ref_add_no_power_on();
+       _mali_osk_pm_dev_barrier();
+
+       while (target_core_nr > enabled_cores) {
+               /*
+                * If there are any cores which do not belong to any domain,
+                * then these will always be found at the head of the list and
+                * we'll thus enabled these first.
+                */
+
+               mali_pp_scheduler_lock();
+
+               if (!_mali_osk_list_empty(&group_list_disabled)) {
+                       struct mali_group *group;
+
+                       group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
+
+                       MALI_DEBUG_ASSERT_POINTER(group);
+                       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+                       mali_pp_scheduler_unlock();
+
+                       mali_pp_scheduler_enable_group_internal(group);
+               } else {
+                       mali_pp_scheduler_unlock();
+                       break; /* no more groups on disabled list */
+               }
+       }
+
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+
+       mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
+{
+       MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+
+       mali_pp_scheduler_suspend();
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+       _mali_osk_pm_dev_ref_add_no_power_on();
+
+       if (NULL != mali_pmu_get_global_pmu_core()) {
+               int i;
+
+               for (i = MALI_MAX_NUMBER_OF_DOMAINS - 1; i >= 0; i--) {
+                       if (target_core_nr < enabled_cores) {
+                               struct mali_pm_domain *domain;
+
+                               domain = mali_pm_domain_get_from_index(i);
+
+                               /* Domain is valid and has pp cores */
+                               if ((NULL != domain) && (NULL != domain->group_list)) {
+                                       struct mali_group *group;
+
+                                       MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) {
+                                               /* If group is pp core */
+                                               if (NULL != mali_group_get_pp_core(group)) {
+                                                       mali_pp_scheduler_disable_group_internal(group);
+                                                       if (target_core_nr >= enabled_cores) {
+                                                               break;
+                                                       }
+                                               }
+                                       }
+                               }
+                       } else {
+                               break;
+                       }
+               }
+       }
+
+       /*
+        * Didn't find enough cores associated with a power domain,
+        * so we need to disable cores which we can't power off with the PMU.
+        * Start with physical groups used by the scheduler,
+        * then remove physical from virtual if even more groups are needed.
+        */
+
+       while (target_core_nr < enabled_cores) {
+               mali_pp_scheduler_lock();
+               if (!_mali_osk_list_empty(&group_list_idle)) {
+                       struct mali_group *group;
+
+                       group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+                       MALI_DEBUG_ASSERT_POINTER(group);
+
+                       mali_pp_scheduler_unlock();
+
+                       mali_pp_scheduler_disable_group_internal(group);
+               } else {
+                       mali_pp_scheduler_unlock();
+                       break; /* No more physical groups */
+               }
+       }
+
+       if (mali_pp_scheduler_has_virtual_group()) {
+               while (target_core_nr < enabled_cores) {
+                       mali_group_lock(virtual_group);
+                       if (!_mali_osk_list_empty(&virtual_group->group_list)) {
+                               struct mali_group *group;
+
+                               group = _MALI_OSK_LIST_ENTRY(virtual_group->group_list.next, struct mali_group, group_list);
+                               MALI_DEBUG_ASSERT_POINTER(group);
+
+                               mali_group_unlock(virtual_group);
+
+                               mali_pp_scheduler_disable_group_internal(group);
+                       } else {
+                               mali_group_unlock(virtual_group);
+                               break; /* No more physical groups in virtual group */
+                       }
+               }
+       }
+
+       _mali_osk_pm_dev_ref_dec_no_power_on();
+
+       mali_pp_scheduler_resume();
+}
+
+int mali_pp_scheduler_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+       if (target_core_nr == enabled_cores) return 0;
+       if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+       if (target_core_nr > num_cores) return -EINVAL;
+       if (0 == target_core_nr) return -EINVAL;
+
+       if (target_core_nr > enabled_cores) {
+               mali_pp_scheduler_core_scale_up(target_core_nr);
+       } else if (target_core_nr < enabled_cores) {
+               mali_pp_scheduler_core_scale_down(target_core_nr);
+       }
+
+       if (target_core_nr != enabled_cores) {
+               MALI_DEBUG_PRINT(2, ("Core scaling failed, target number: %d, actual number: %d\n", target_core_nr, enabled_cores));
+       }
+
+       mali_pp_scheduler_notify_core_change(enabled_cores);
+
+       return 0;
+}
+
+void mali_pp_scheduler_core_scaling_enable(void)
+{
+       /* PS: Core scaling is by default enabled */
+       core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_pp_scheduler_core_scaling_disable(void)
+{
+       core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_pp_scheduler_core_scaling_is_enabled(void)
+{
+       return core_scaling_enabled;
+}
+
+static void mali_pp_scheduler_job_queued(void)
+{
+       if (mali_utilization_enabled()) {
+               /*
+                * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
+                * This will be fine because we only loose the tiny idle gap between jobs, but
+                * we will instead get less utilization work to do (less locks taken)
+                */
+               mali_utilization_pp_start();
+       }
+}
+
+static void mali_pp_scheduler_job_completed(void)
+{
+       /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
+       _mali_osk_pm_dev_ref_dec();
+
+       if (mali_utilization_enabled()) {
+               mali_utilization_pp_end();
+       }
+}
+
+static void mali_pp_scheduler_abort_job_and_unlock_scheduler(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+       /* This job should not be on any lists. */
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+
+       _mali_osk_list_delinit(&job->session_fb_lookup_list);
+
+       mali_pp_scheduler_unlock();
+
+       /* Release tracker. */
+       mali_timeline_tracker_release(&job->tracker);
+}
+
+static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job)
+{
+       _mali_osk_list_t *queue = NULL;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       struct mali_pp_job *iter, *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->session);
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+       if (mali_pp_job_needs_dma_buf_mapping(job)) {
+               mali_dma_buf_map_job(job);
+       }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+       mali_pp_scheduler_lock();
+
+       if (unlikely(job->session->is_aborting)) {
+               /* Before checking if the session is aborting, the scheduler must be locked. */
+               MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job));
+
+               mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
+
+               /* Delete job. */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+               mali_pp_scheduler_deferred_job_delete(job);
+#else
+               mali_pp_job_delete(job);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+               /* Since we are aborting we ignore the scheduler mask. */
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       mali_pp_scheduler_job_queued();
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
+#endif
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
+
+       job->cache_order = mali_scheduler_get_new_cache_order();
+
+       /* Determine which queue the job should be added to. */
+       if (mali_pp_job_is_virtual(job)) {
+               if (job->session->use_high_priority_job_queue) {
+                       queue = &virtual_job_queue.high_pri;
+               } else {
+                       queue = &virtual_job_queue.normal_pri;
+               }
+
+               virtual_job_queue.depth += 1;
+
+               /* Set schedule bitmask if the virtual group is idle. */
+               if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
+                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
+               }
+       } else {
+               if (job->session->use_high_priority_job_queue) {
+                       queue = &job_queue.high_pri;
+               } else {
+                       queue = &job_queue.normal_pri;
+               }
+
+               job_queue.depth += mali_pp_job_get_sub_job_count(job);
+
+               /* Set schedule bitmask if there are physical PP cores available, or if there is an
+                * idle virtual group. */
+               if (!_mali_osk_list_empty(&group_list_idle)
+                   || (mali_pp_scheduler_has_virtual_group()
+                       && (VIRTUAL_GROUP_IDLE == virtual_group_state))) {
+                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
+               }
+       }
+
+       /* Find position in queue where job should be added. */
+       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_pp_job, list) {
+               if (mali_pp_job_should_start_after(job, iter)) {
+                       break;
+               }
+       }
+
+       /* Add job to queue. */
+       _mali_osk_list_add(&job->list, &iter->list);
+
+       /* Add job to session list. */
+       _mali_osk_list_addtail(&job->session_list, &(job->session->pp_job_list));
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+                            mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
+                            mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+       mali_pp_scheduler_unlock();
+
+       return schedule_mask;
+}
+
+mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->session);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
+
+       if (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & job->tracker.activation_error) {
+               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", mali_pp_job_get_id(job), job));
+
+               mali_pp_scheduler_lock();
+               mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
+
+               mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+               mali_pp_scheduler_finalize_job(job);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       /* PP job is ready to run, queue it. */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+       if (mali_pp_job_needs_dma_buf_mapping(job)) {
+               mali_pp_scheduler_deferred_job_queue(job);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+       schedule_mask = mali_pp_scheduler_queue_job(job);
+
+       return schedule_mask;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_scheduler.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_pp_scheduler.h
new file mode 100644 (file)
index 0000000..944caad
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "linux/mali/mali_utgard.h"
+
+/** Initalize the HW independent parts of the  PP scheduler
+ */
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+/** Poplulate the PP scheduler with groups
+ */
+void mali_pp_scheduler_populate(void);
+void mali_pp_scheduler_depopulate(void);
+
+/**
+ * @brief Handle job completion.
+ *
+ * Will attempt to start a new job on the locked group.
+ *
+ * If all sub jobs have completed the job's tracker will be released, any other resources associated
+ * with the job will be freed.  A notification will also be sent to user space.
+ *
+ * Releasing the tracker might activate other jobs, so if appropriate we also schedule them.
+ *
+ * @note Group must be locked when entering this function.  Will be unlocked before exiting.
+ *
+ * @param group The group that completed the job.
+ * @param job The job that is done.
+ * @param sub_job Sub job of job.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @param in_upper_half MALI_TRUE if called from upper half, MALI_FALSE if not.
+ */
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+/**
+ * @brief Abort all running and queued PP jobs from session.
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the
+ * queue and jobs currently running on a core will be aborted.
+ *
+ * @param session Session that is aborting.
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the PP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ *
+ * This function is intended for power on reset of all cores.
+ * No locking is done, which can only be safe if the scheduler is paused and
+ * all cores idle. That is always the case on init and power on.
+ */
+void mali_pp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
+
+/**
+ * @brief Get the virtual PP core
+ *
+ * The returned PP core may only be used to prepare DMA command buffers for the
+ * PP core. Other actions must go through the PP scheduler, or the virtual
+ * group.
+ *
+ * @return Pointer to the virtual PP core, NULL if this doesn't exist
+ */
+struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void);
+
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_pp_scheduler_enable_group(struct mali_group *group);
+void mali_pp_scheduler_disable_group(struct mali_group *group);
+
+/**
+ * @brief Used by the Timeline system to queue a PP job.
+ *
+ * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
+ * call.
+ */
+mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job);
+
+/**
+ * @brief Schedule queued jobs on idle cores.
+ */
+void mali_pp_scheduler_schedule(void);
+
+int mali_pp_scheduler_set_perf_level(u32 cores, mali_bool override);
+
+void mali_pp_scheduler_core_scaling_enable(void);
+void mali_pp_scheduler_core_scaling_disable(void);
+mali_bool mali_pp_scheduler_core_scaling_is_enabled(void);
+
+u32 mali_pp_scheduler_get_num_cores_total(void);
+u32 mali_pp_scheduler_get_num_cores_enabled(void);
+
+/**
+ * @brief Returns the number of Pixel Processors in the system irrespective of the context
+ *
+ * @return number of physical Pixel Processor cores in the system
+ */
+u32 mali_pp_scheduler_get_num_cores_total(void);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler.c
new file mode 100644 (file)
index 0000000..5b04601
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_scheduler.h"
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+
+static _mali_osk_atomic_t mali_job_id_autonumber;
+static _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_high_pri = NULL;
+static _mali_osk_wq_work_t *gp_scheduler_wq_high_pri = NULL;
+
+static void mali_scheduler_wq_schedule_pp(void *arg)
+{
+       MALI_IGNORE(arg);
+
+       mali_pp_scheduler_schedule();
+}
+
+static void mali_scheduler_wq_schedule_gp(void *arg)
+{
+       MALI_IGNORE(arg);
+
+       mali_gp_scheduler_schedule();
+}
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) {
+               MALI_DEBUG_PRINT(1,  ("Initialization of atomic job id counter failed.\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) {
+               MALI_DEBUG_PRINT(1,  ("Initialization of atomic job cache order counter failed.\n"));
+               _mali_osk_atomic_term(&mali_job_id_autonumber);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL);
+       if (NULL == pp_scheduler_wq_high_pri) {
+               _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+               _mali_osk_atomic_term(&mali_job_id_autonumber);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL);
+       if (NULL == gp_scheduler_wq_high_pri) {
+               _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+               _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+               _mali_osk_atomic_term(&mali_job_id_autonumber);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+       _mali_osk_wq_delete_work(gp_scheduler_wq_high_pri);
+       _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+       _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+       _mali_osk_atomic_term(&mali_job_id_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+       u32 job_id = _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+       return job_id;
+}
+
+u32 mali_scheduler_get_new_cache_order(void)
+{
+       u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
+       return job_cache_order;
+}
+
+void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+       if (MALI_SCHEDULER_MASK_GP & mask) {
+               /* GP needs scheduling. */
+               if (deferred_schedule) {
+                       /* Schedule GP deferred. */
+                       _mali_osk_wq_schedule_work_high_pri(gp_scheduler_wq_high_pri);
+               } else {
+                       /* Schedule GP now. */
+                       mali_gp_scheduler_schedule();
+               }
+       }
+
+       if (MALI_SCHEDULER_MASK_PP & mask) {
+               /* PP needs scheduling. */
+               if (deferred_schedule) {
+                       /* Schedule PP deferred. */
+                       _mali_osk_wq_schedule_work_high_pri(pp_scheduler_wq_high_pri);
+               } else {
+                       /* Schedule PP now. */
+                       mali_pp_scheduler_schedule();
+               }
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler.h
new file mode 100644 (file)
index 0000000..90a5ca3
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+u32 mali_scheduler_get_new_cache_order(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the both the PP and GP scheuduler.
+ * This must be called after the Mali HW has been powered on in order to reset
+ * the HW.
+ */
+MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+{
+       mali_gp_scheduler_reset_all_groups();
+       mali_pp_scheduler_reset_all_groups();
+}
+
+/**
+ * @brief Zap TLB on all active groups running \a session
+ *
+ * @param session Pointer to the session to zap
+ */
+MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+{
+       mali_gp_scheduler_zap_all_active(session);
+       mali_pp_scheduler_zap_all_active(session);
+}
+
+/**
+ * Check if bit is set in scheduler mask.
+ *
+ * @param mask Scheduler mask to check.
+ * @param bit Bit to check.
+ * @return MALI_TRUE if bit is set in scheduler mask, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_scheduler_mask_is_set(mali_scheduler_mask mask, mali_scheduler_mask bit)
+{
+       return MALI_SCHEDULER_MASK_EMPTY != (bit & mask);
+}
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+/* Enable or disable scheduler hint. */
+extern mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+
+MALI_STATIC_INLINE void mali_scheduler_hint_enable(mali_scheduler_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+       mali_scheduler_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_scheduler_hint_disable(mali_scheduler_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+       mali_scheduler_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_scheduler_hint_is_enabled(mali_scheduler_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+       return mali_scheduler_hints[hint];
+}
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler_types.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_scheduler_types.h
new file mode 100644 (file)
index 0000000..7643c99
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_TYPES_H__
+#define __MALI_SCHEDULER_TYPES_H__
+
+#include "mali_osk.h"
+
+#define MALI_SCHEDULER_JOB_ID_SPAN 65535
+
+/**
+ * Bitmask used for defered scheduling of subsystems.
+ */
+typedef u32 mali_scheduler_mask;
+
+#define MALI_SCHEDULER_MASK_GP (1<<0)
+#define MALI_SCHEDULER_MASK_PP (1<<1)
+
+#define MALI_SCHEDULER_MASK_EMPTY 0
+#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
+
+typedef enum {
+       MALI_SCHEDULER_HINT_GP_BOUND = 0
+#define MALI_SCHEDULER_HINT_MAX        1
+} mali_scheduler_hint;
+
+#endif /* __MALI_SCHEDULER_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_session.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_session.c
new file mode 100644 (file)
index 0000000..39ed8a0
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+static u32 mali_session_count = 0;
+
+_mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+       _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+       mali_sessions_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+       if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+       _mali_osk_spinlock_irq_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+       mali_session_lock();
+       _mali_osk_list_add(&session->link, &mali_sessions);
+       mali_session_count++;
+       mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+       mali_session_lock();
+       _mali_osk_list_delinit(&session->link);
+       mali_session_count--;
+       mali_session_unlock();
+}
+
+u32 mali_session_get_count(void)
+{
+       return mali_session_count;
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+u32 mali_session_max_window_num(void)
+{
+       struct mali_session_data *session, *tmp;
+       u32 max_window_num = 0;
+       u32 tmp_number = 0;
+
+       mali_session_lock();
+
+       MALI_SESSION_FOREACH(session, tmp, link) {
+               tmp_number = _mali_osk_atomic_xchg(&session->number_of_window_jobs, 0);
+               if (max_window_num < tmp_number) {
+                       max_window_num = tmp_number;
+               }
+       }
+
+       mali_session_unlock();
+
+       return max_window_num;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_session.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_session.h
new file mode 100644 (file)
index 0000000..8318a8c
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_timeline_system;
+struct mali_soft_system;
+
+/* Number of frame builder job lists per session. */
+#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16
+#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
+
+struct mali_session_data {
+       _mali_osk_notification_queue_t * ioctl_queue;
+
+       _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+       mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+       _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+       struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+       _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+       _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
+#endif
+
+       _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id.  Used to link jobs from same frame builder. */
+
+       struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
+       struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
+
+       mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
+       mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(mali_sessions_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+       _mali_osk_spinlock_irq_unlock(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+u32 mali_session_get_count(void);
+
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+       _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+       return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+       _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in  window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+u32 mali_session_max_window_num(void);
+#endif
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_soft_job.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_soft_job.c
new file mode 100644 (file)
index 0000000..b03c083
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_soft_job.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_timeline.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_scheduler.h"
+
+MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       _mali_osk_spinlock_irq_lock(system->lock);
+       MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system));
+       MALI_DEBUG_ASSERT(0 == system->lock_owner);
+       MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system));
+       MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+       MALI_DEBUG_CODE(system->lock_owner = 0);
+       _mali_osk_spinlock_irq_unlock(system->lock);
+}
+
+#if defined(DEBUG)
+MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+}
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system)
+#else
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system)
+#endif /* defined(DEBUG) */
+
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
+{
+       u32 i;
+       struct mali_soft_job_system *system;
+       struct mali_soft_job *job;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
+       if (NULL == system) {
+               return NULL;
+       }
+
+       system->session = session;
+
+       system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+       if (NULL == system->lock) {
+               mali_soft_job_system_destroy(system);
+               return NULL;
+       }
+       system->lock_owner = 0;
+
+       _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
+       _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
+
+       for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
+               job = &(system->jobs[i]);
+               _mali_osk_list_add(&(job->system_list), &(system->jobs_free));
+               job->system = system;
+               job->state = MALI_SOFT_JOB_STATE_FREE;
+               job->id = i;
+       }
+
+       return system;
+}
+
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       /* All jobs should be free at this point. */
+       MALI_DEBUG_CODE( {
+               u32 i;
+               struct mali_soft_job *job;
+
+               for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i)
+               {
+                       job = &(system->jobs[i]);
+                       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
+               }
+       });
+
+       if (NULL != system) {
+               if (NULL != system->lock) {
+                       _mali_osk_spinlock_irq_term(system->lock);
+               }
+               _mali_osk_free(system);
+       }
+}
+
+static struct mali_soft_job *mali_soft_job_system_alloc_job(struct mali_soft_job_system *system)
+{
+       struct mali_soft_job *job;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+       if (_mali_osk_list_empty(&(system->jobs_free))) {
+               /* No jobs available. */
+               return NULL;
+       }
+
+       /* Grab first job and move it to the used list. */
+       job = _MALI_OSK_LIST_ENTRY(system->jobs_free.next, struct mali_soft_job, system_list);
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
+
+       _mali_osk_list_move(&(job->system_list), &(system->jobs_used));
+       job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+       MALI_DEBUG_ASSERT(system == job->system);
+
+       return job;
+}
+
+static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_soft_job_system_lock(job->system);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE != job->state);
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+       MALI_DEBUG_ASSERT(system == job->system);
+
+       job->state = MALI_SOFT_JOB_STATE_FREE;
+       _mali_osk_list_move(&(job->system_list), &(system->jobs_free));
+
+       mali_soft_job_system_unlock(job->system);
+}
+
+MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+       if (job_id < MALI_MAX_NUM_SOFT_JOBS) {
+               return &system->jobs[job_id];
+       }
+
+       return NULL;
+}
+
+void mali_soft_job_destroy(struct mali_soft_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job));
+
+       if (NULL != job) {
+               if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return;
+
+               _mali_osk_atomic_term(&job->refcount);
+
+               if (NULL != job->activated_notification) {
+                       _mali_osk_notification_delete(job->activated_notification);
+                       job->activated_notification = NULL;
+               }
+
+               mali_soft_job_system_free_job(job->system, job);
+       }
+}
+
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job)
+{
+       struct mali_soft_job *job;
+       _mali_osk_notification_t *notification = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_TYPE_USER_SIGNALED >= type);
+
+       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
+               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+               if (unlikely(NULL == notification)) {
+                       MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+                       return NULL;
+               }
+       }
+
+       mali_soft_job_system_lock(system);
+
+       job = mali_soft_job_system_alloc_job(system);
+       if (NULL == job) {
+               mali_soft_job_system_unlock(system);
+               MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate job"));
+               _mali_osk_notification_delete(notification);
+               return NULL;
+       }
+
+       job->type = type;
+       job->user_job = user_job;
+       job->activated = MALI_FALSE;
+
+       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
+               job->activated_notification = notification;
+       }
+
+       _mali_osk_atomic_init(&job->refcount, 1);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+       MALI_DEBUG_ASSERT(system == job->system);
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+
+       mali_soft_job_system_unlock(system);
+
+       return job;
+}
+
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence)
+{
+       mali_timeline_point point;
+       struct mali_soft_job_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+       system = job->system;
+
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system);
+
+       mali_soft_job_system_lock(system);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+       job->state = MALI_SOFT_JOB_STATE_STARTED;
+
+       mali_soft_job_system_unlock(system);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job));
+
+       mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job);
+       point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT);
+
+       return point;
+}
+
+static mali_bool mali_soft_job_is_activated(void *data)
+{
+       struct mali_soft_job *job;
+
+       job = (struct mali_soft_job *) data;
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       return job->activated;
+}
+
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id)
+{
+       struct mali_soft_job *job;
+       struct mali_timeline_system *timeline_system;
+       mali_scheduler_mask schedule_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_soft_job_system_lock(system);
+
+       job = mali_soft_job_system_lookup_job(system, job_id);
+
+       if (NULL == job || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+               mali_soft_job_system_unlock(system);
+               MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+               mali_soft_job_system_unlock(system);
+
+               MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+               MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job));
+               mali_soft_job_destroy(job);
+
+               return _MALI_OSK_ERR_TIMEOUT;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+       job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+       mali_soft_job_system_unlock(system);
+
+       /* Since the job now is in signaled state, timeouts from the timeline system will be
+        * ignored, and it is not possible to signal this job again. */
+
+       timeline_system = system->session->timeline_system;
+       MALI_DEBUG_ASSERT_POINTER(timeline_system);
+
+       /* Wait until activated. */
+       _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
+
+       schedule_mask = mali_timeline_tracker_release(&job->tracker);
+       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+       mali_soft_job_destroy(job);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
+{
+       if (NULL != job->activated_notification) {
+               _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer;
+               res->user_job = job->user_job;
+               mali_session_send_notification(job->system->session, job->activated_notification);
+       }
+       job->activated_notification = NULL;
+}
+
+void mali_soft_job_system_activate_job(struct mali_soft_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+       MALI_DEBUG_ASSERT_POINTER(job->system->session);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));
+
+       mali_soft_job_system_lock(job->system);
+
+       if (unlikely(job->system->session->is_aborting)) {
+               MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));
+
+               mali_soft_job_system_unlock(job->system);
+
+               /* Since we are in shutdown, we can ignore the scheduling bitmask. */
+               mali_timeline_tracker_release(&job->tracker);
+               mali_soft_job_destroy(job);
+               return;
+       }
+
+       /* Send activated notification. */
+       mali_soft_job_send_activated_notification(job);
+
+       /* Wake up sleeping signaler. */
+       job->activated = MALI_TRUE;
+       _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+       mali_soft_job_system_unlock(job->system);
+}
+
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+       MALI_DEBUG_ASSERT_POINTER(job->system->session);
+       MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job));
+
+       mali_soft_job_system_lock(job->system);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED  == job->state ||
+                         MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+       if (unlikely(job->system->session->is_aborting)) {
+               /* The session is aborting.  This job will be released and destroyed by @ref
+                * mali_soft_job_system_abort(). */
+               mali_soft_job_system_unlock(job->system);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       if (MALI_SOFT_JOB_STATE_STARTED != job->state) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+               /* The job is about to be signaled, ignore timeout. */
+               MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job));
+               mali_soft_job_system_unlock(job->system);
+               return schedule_mask;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+       job->state = MALI_SOFT_JOB_STATE_TIMED_OUT;
+       _mali_osk_atomic_inc(&job->refcount);
+
+       mali_soft_job_system_unlock(job->system);
+
+       schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+       mali_soft_job_destroy(job);
+
+       return schedule_mask;
+}
+
+void mali_soft_job_system_abort(struct mali_soft_job_system *system)
+{
+       u32 i;
+       struct mali_soft_job *job, *tmp;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session));
+
+       mali_soft_job_system_lock(system);
+
+       for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
+               job = &(system->jobs[i]);
+
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE      == job->state ||
+                                 MALI_SOFT_JOB_STATE_STARTED   == job->state ||
+                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+               if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
+                       /* If the job has been activated, we have to release the tracker and destroy
+                        * the job.  If not, the tracker will be released and the job destroyed when
+                        * it is activated. */
+                       if (MALI_TRUE == job->activated) {
+                               MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job));
+
+                               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+                               _mali_osk_list_move(&job->system_list, &jobs);
+                       }
+               } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+                       MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job));
+
+                       /* We need to destroy this soft job. */
+                       _mali_osk_list_move(&job->system_list, &jobs);
+               }
+       }
+
+       mali_soft_job_system_unlock(system);
+
+       /* Release and destroy jobs. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED  == job->state ||
+                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+               if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
+                       mali_timeline_tracker_release(&job->tracker);
+               }
+
+               /* Move job back to used list before destroying. */
+               _mali_osk_list_move(&job->system_list, &system->jobs_used);
+
+               mali_soft_job_destroy(job);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_soft_job.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_soft_job.h
new file mode 100644 (file)
index 0000000..dd0df68
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SOFT_JOB_H__
+#define __MALI_SOFT_JOB_H__
+
+#include "mali_osk.h"
+
+#include "mali_timeline.h"
+
+struct mali_timeline_fence;
+struct mali_session_data;
+struct mali_soft_job;
+struct mali_soft_job_system;
+
+/**
+ * Soft job types.
+ *
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
+ * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
+ * by the Timeline system.
+ */
+typedef enum mali_soft_job_type {
+       MALI_SOFT_JOB_TYPE_USER_SIGNALED,
+} mali_soft_job_type;
+
+/**
+ * Soft job state.
+ *
+ * All soft jobs in a soft job system will initially be in state MALI_SOFT_JOB_STATE_FREE.  On @ref
+ * mali_soft_job_system_start_job a job will first be allocated.  A job in state
+ * MALI_SOFT_JOB_STATE_FREE will be picked and the state changed to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
+ * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft
+ * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
+ *
+ * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
+ * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT.  This can only happen to soft jobs in state
+ * MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * When a soft job's reference count reaches zero, it will be freed and the state returns to
+ * MALI_SOFT_JOB_STATE_FREE.
+ */
+typedef enum mali_soft_job_state {
+       MALI_SOFT_JOB_STATE_FREE,
+       MALI_SOFT_JOB_STATE_ALLOCATED,
+       MALI_SOFT_JOB_STATE_STARTED,
+       MALI_SOFT_JOB_STATE_SIGNALED,
+       MALI_SOFT_JOB_STATE_TIMED_OUT,
+} mali_soft_job_state;
+
+#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
+
+/* Maximum number of soft jobs per soft system. */
+#define MALI_MAX_NUM_SOFT_JOBS 20
+
+/**
+ * Soft job struct.
+ *
+ * Soft job can be used to represent any kind of CPU work done in kernel-space.
+ */
+typedef struct mali_soft_job {
+       mali_soft_job_type            type;                   /**< Soft job type.  Must be one of MALI_SOFT_JOB_TYPE_*. */
+       u32                           user_job;               /**< Identifier for soft job in user space. */
+       _mali_osk_atomic_t            refcount;               /**< Soft jobs are reference counted to prevent premature deletion. */
+       struct mali_timeline_tracker  tracker;                /**< Timeline tracker for soft job. */
+       mali_bool                     activated;              /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
+       _mali_osk_notification_t     *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */
+
+       /* Protected by soft job system lock. */
+       u32                           id;                     /**< Used by user-space to find corresponding soft job in kernel-space. */
+       mali_soft_job_state           state;                  /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */
+       struct mali_soft_job_system  *system;                 /**< The soft job system this job is in. */
+       _mali_osk_list_t              system_list;            /**< List element used by soft job system. */
+} mali_soft_job;
+
+/**
+ * Per-session soft job system.
+ *
+ * The soft job system is used to manage all soft jobs that belongs to a session.
+ */
+typedef struct mali_soft_job_system {
+       struct mali_session_data *session;                    /**< The session this soft job system belongs to. */
+
+       struct mali_soft_job jobs[MALI_MAX_NUM_SOFT_JOBS];    /**< Array of all soft jobs in this system. */
+       _MALI_OSK_LIST_HEAD(jobs_free);                       /**< List of all free soft jobs. */
+       _MALI_OSK_LIST_HEAD(jobs_used);                       /**< List of all allocated soft jobs. */
+
+       _mali_osk_spinlock_irq_t *lock;                       /**< Lock used to protect soft job system and its soft jobs. */
+       u32 lock_owner;                                       /**< Contains tid of thread that locked the system or 0, if not locked. */
+} mali_soft_job_system;
+
+/**
+ * Create a soft job system.
+ *
+ * @param session The session this soft job system will belong to.
+ * @return The new soft job system, or NULL if unsuccessful.
+ */
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session);
+
+/**
+ * Destroy a soft job system.
+ *
+ * @note The soft job must not have any started or activated jobs.  Call @ref
+ * mali_soft_job_system_abort first.
+ *
+ * @param system The soft job system we are destroying.
+ */
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
+
+/**
+ * Create a soft job.
+ *
+ * @param system Soft job system to create soft job from.
+ * @param type Type of the soft job.
+ * @param user_job Identifier for soft job in user space.
+ * @return New soft job if successful, NULL if not.
+ */
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job);
+
+/**
+ * Destroy soft job.
+ *
+ * @param job Soft job to destroy.
+ */
+void mali_soft_job_destroy(struct mali_soft_job *job);
+
+/**
+ * Start a soft job.
+ *
+ * The soft job will be added to the Timeline system which will then activate it after all
+ * dependencies have been resolved.
+ *
+ * Create soft jobs with @ref mali_soft_job_create before starting them.
+ *
+ * @param job Soft job to start.
+ * @param fence Fence representing dependencies for this soft job.
+ * @return Point on soft job timeline.
+ */
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence);
+
+/**
+ * Use by user-space to signal that a soft job has completed.
+ *
+ * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED.
+ *
+ * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful.
+ *
+ * @note If the soft job was signaled successfully, or it received a time out, the soft job will be
+ * destroyed after this call and should no longer be used.
+ *
+ * @note This function will block until the soft job has been activated.
+ *
+ * @param system The soft job system the job was started in.
+ * @param job_id ID of soft job we are signaling.
+ *
+ * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the
+ * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job.
+ */
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id);
+
+/**
+ * Used by the Timeline system to activate a soft job.
+ *
+ * @param job The soft job that is being activated.
+ */
+void mali_soft_job_system_activate_job(struct mali_soft_job *job);
+
+/**
+ * Used by the Timeline system to timeout a soft job.
+ *
+ * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after
+ * activation.
+ *
+ * @param job The soft job that is being timed out.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job);
+
+/**
+ * Used to cleanup activated soft jobs in the soft job system on session abort.
+ *
+ * @param system The soft job system that is being aborted.
+ */
+void mali_soft_job_system_abort(struct mali_soft_job_system *system);
+
+#endif /* __MALI_SOFT_JOB_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_spinlock_reentrant.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_spinlock_reentrant.c
new file mode 100644 (file)
index 0000000..1778972
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_spinlock_reentrant.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
+{
+       struct mali_spinlock_reentrant *spinlock;
+
+       spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
+       if (NULL == spinlock) {
+               return NULL;
+       }
+
+       spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
+       if (NULL == spinlock->lock) {
+               mali_spinlock_reentrant_term(spinlock);
+               return NULL;
+       }
+
+       return spinlock;
+}
+
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock);
+       MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner);
+
+       if (NULL != spinlock->lock) {
+               _mali_osk_spinlock_irq_term(spinlock->lock);
+       }
+
+       _mali_osk_free(spinlock);
+}
+
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock);
+       MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+       MALI_DEBUG_ASSERT(0 != tid);
+
+       MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__));
+
+       if (tid != spinlock->owner) {
+               _mali_osk_spinlock_irq_lock(spinlock->lock);
+               MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter);
+               spinlock->owner = tid;
+       }
+
+       MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__));
+
+       ++spinlock->counter;
+}
+
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock);
+       MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+       MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner);
+
+       --spinlock->counter;
+       if (0 == spinlock->counter) {
+               spinlock->owner = 0;
+               MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__));
+               _mali_osk_spinlock_irq_unlock(spinlock->lock);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_spinlock_reentrant.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_spinlock_reentrant.h
new file mode 100644 (file)
index 0000000..d112f5f
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SPINLOCK_REENTRANT_H__
+#define __MALI_SPINLOCK_REENTRANT_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * Reentrant spinlock.
+ */
+struct mali_spinlock_reentrant {
+       _mali_osk_spinlock_irq_t *lock;
+       u32               owner;
+       u32               counter;
+};
+
+/**
+ * Create a new reentrant spinlock.
+ *
+ * @param lock_order Lock order.
+ * @return New reentrant spinlock.
+ */
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order);
+
+/**
+ * Terminate reentrant spinlock and free any associated resources.
+ *
+ * @param spinlock Reentrant spinlock to terminate.
+ */
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock);
+
+/**
+ * Wait for reentrant spinlock to be signaled.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Signal reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Check if thread is holding reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+       return (tid == spinlock->owner && 0 < spinlock->counter);
+}
+
+#endif /* __MALI_SPINLOCK_REENTRANT_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline.c
new file mode 100644 (file)
index 0000000..a2eade6
--- /dev/null
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timeline.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+#include "mali_scheduler.h"
+#include "mali_soft_job.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+        struct mali_timeline_waiter *waiter);
+
+#if defined(CONFIG_SYNC)
+/* Callback that is called when a sync fence a tracker is waiting on is signaled. */
+static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
+{
+       struct mali_timeline_system  *system;
+       struct mali_timeline_waiter  *waiter;
+       struct mali_timeline_tracker *tracker;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       u32 tid = _mali_osk_get_tid();
+       mali_bool is_aborting = MALI_FALSE;
+       int fence_status = sync_fence->status;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
+
+       tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       system = tracker->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       is_aborting = system->session->is_aborting;
+       if (!is_aborting && (0 > fence_status)) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+               tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+       }
+
+       waiter = tracker->waiter_sync;
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+
+       tracker->sync_fence = NULL;
+       schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+       /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+       if (is_aborting) {
+               _mali_osk_wait_queue_wake_up(system->wait_queue);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       sync_fence_put(sync_fence);
+
+       if (!is_aborting) {
+               mali_scheduler_schedule_from_mask(schedule_mask, MALI_TRUE);
+       }
+}
+#endif /* defined(CONFIG_SYNC) */
+
+static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker)
+{
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type);
+
+       return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job);
+}
+
+static void mali_timeline_timer_callback(void *data)
+{
+       struct mali_timeline_system *system;
+       struct mali_timeline_tracker *tracker;
+       struct mali_timeline *timeline;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       u32 tid = _mali_osk_get_tid();
+
+       timeline = (struct mali_timeline *) data;
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       system = timeline->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       if (!system->timer_enabled) {
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+               return;
+       }
+
+       tracker = timeline->tracker_tail;
+       timeline->timer_active = MALI_FALSE;
+
+       if (NULL != tracker && MALI_TRUE == tracker->timer_active) {
+               /* This is likely the delayed work that has been schedule out before cancelled. */
+               if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) {
+                       mali_spinlock_reentrant_signal(system->spinlock, tid);
+                       return;
+               }
+
+               schedule_mask = mali_timeline_tracker_time_out(tracker);
+               tracker->timer_active = MALI_FALSE;
+       } else {
+               MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n"));
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
+{
+       u32 i;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+       system->timer_enabled = MALI_FALSE;
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (NULL != timeline->delayed_work) {
+                       _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+                       timeline->timer_active = MALI_FALSE;
+               }
+       }
+}
+
+static void mali_timeline_destroy(struct mali_timeline *timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       if (NULL != timeline) {
+               /* Assert that the timeline object has been properly cleaned up before destroying it. */
+               MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+               MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+               MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+               MALI_DEBUG_ASSERT(NULL != timeline->system);
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+
+#if defined(CONFIG_SYNC)
+               if (NULL != timeline->sync_tl) {
+                       sync_timeline_destroy(timeline->sync_tl);
+               }
+#endif /* defined(CONFIG_SYNC) */
+
+               if (NULL != timeline->delayed_work) {
+                       _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+                       _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+               }
+
+               _mali_osk_free(timeline);
+       }
+}
+
+static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id)
+{
+       struct mali_timeline *timeline;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX);
+
+       timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline));
+       if (NULL == timeline) {
+               return NULL;
+       }
+
+       /* Initially the timeline is empty. */
+#if defined(MALI_TIMELINE_DEBUG_START_POINT)
+       /* Start the timeline a bit before wrapping when debugging. */
+       timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128;
+#else
+       timeline->point_next = 1;
+#endif
+       timeline->point_oldest = timeline->point_next;
+
+       /* The tracker and waiter lists will initially be empty. */
+
+       timeline->system = system;
+       timeline->id = id;
+
+       timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline);
+       if (NULL == timeline->delayed_work) {
+               mali_timeline_destroy(timeline);
+               return NULL;
+       }
+
+       timeline->timer_active = MALI_FALSE;
+
+#if defined(CONFIG_SYNC)
+       {
+               char timeline_name[32];
+
+               switch (id) {
+               case MALI_TIMELINE_GP:
+                       _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid());
+                       break;
+               case MALI_TIMELINE_PP:
+                       _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid());
+                       break;
+               case MALI_TIMELINE_SOFT:
+                       _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid());
+                       break;
+               default:
+                       MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id));
+                       mali_timeline_destroy(timeline);
+                       return NULL;
+               }
+
+               timeline->sync_tl = mali_sync_timeline_create(timeline_name);
+               if (NULL == timeline->sync_tl) {
+                       mali_timeline_destroy(timeline);
+                       return NULL;
+               }
+       }
+#endif /* defined(CONFIG_SYNC) */
+
+       return timeline;
+}
+
+static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       if (mali_timeline_is_full(timeline)) {
+               /* Don't add tracker if timeline is full. */
+               tracker->point = MALI_TIMELINE_NO_POINT;
+               return;
+       }
+
+       tracker->timeline = timeline;
+       tracker->point    = timeline->point_next;
+
+       /* Find next available point. */
+       timeline->point_next++;
+       if (MALI_TIMELINE_NO_POINT == timeline->point_next) {
+               timeline->point_next++;
+       }
+
+       MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+
+       /* Add tracker as new head on timeline's tracker list. */
+       if (NULL == timeline->tracker_head) {
+               /* Tracker list is empty. */
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+
+               timeline->tracker_tail = tracker;
+
+               MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+               MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev);
+       } else {
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+
+               tracker->timeline_prev = timeline->tracker_head;
+               timeline->tracker_head->timeline_next = tracker;
+
+               MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+       }
+       timeline->tracker_head = tracker;
+
+       MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+       MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev);
+}
+
+/* Inserting the waiter object into the given timeline */
+static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new)
+{
+       struct mali_timeline_waiter *waiter_prev;
+       struct mali_timeline_waiter *waiter_next;
+
+       /* Waiter time must be between timeline head and tail, and there must
+        * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
+       MALI_DEBUG_ASSERT(( waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+       MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
+
+       /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
+       waiter_prev = timeline->waiter_head; /* Insert new after  waiter_prev */
+       waiter_next = NULL;                  /* Insert new before waiter_next */
+
+       /* Iterating backwards from head (newest) to tail (oldest) until we
+        * find the correct spot to insert the new waiter */
+       while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) {
+               waiter_next = waiter_prev;
+               waiter_prev = waiter_prev->timeline_prev;
+       }
+
+       if (NULL == waiter_prev && NULL == waiter_next) {
+               /* list is empty */
+               timeline->waiter_head = waiter_new;
+               timeline->waiter_tail = waiter_new;
+       } else if (NULL == waiter_next) {
+               /* insert at head */
+               waiter_new->timeline_prev = timeline->waiter_head;
+               timeline->waiter_head->timeline_next = waiter_new;
+               timeline->waiter_head = waiter_new;
+       } else if (NULL == waiter_prev) {
+               /* insert at tail */
+               waiter_new->timeline_next = timeline->waiter_tail;
+               timeline->waiter_tail->timeline_prev = waiter_new;
+               timeline->waiter_tail = waiter_new;
+       } else {
+               /* insert between */
+               waiter_new->timeline_next = waiter_next;
+               waiter_new->timeline_prev = waiter_prev;
+               waiter_next->timeline_prev = waiter_new;
+               waiter_prev->timeline_next = waiter_new;
+       }
+}
+
+static void mali_timeline_update_delayed_work(struct mali_timeline *timeline)
+{
+       struct mali_timeline_system *system;
+       struct mali_timeline_tracker *oldest_tracker;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+
+       system = timeline->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       /* Timer is disabled, early out. */
+       if (!system->timer_enabled) return;
+
+       oldest_tracker = timeline->tracker_tail;
+       if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) {
+               if (MALI_FALSE == oldest_tracker->timer_active) {
+                       if (MALI_TRUE == timeline->timer_active) {
+                               _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+                       }
+                       _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ);
+                       oldest_tracker->timer_active = MALI_TRUE;
+                       timeline->timer_active = MALI_TRUE;
+               }
+       } else if (MALI_TRUE == timeline->timer_active) {
+               _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+               timeline->timer_active = MALI_FALSE;
+       }
+}
+
+static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       MALI_DEBUG_CODE({
+               struct mali_timeline_system *system = timeline->system;
+               MALI_DEBUG_ASSERT_POINTER(system);
+
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+       });
+
+       if (NULL != timeline->tracker_tail) {
+               /* Set oldest point to oldest tracker's point */
+               timeline->point_oldest = timeline->tracker_tail->point;
+       } else {
+               /* No trackers, mark point list as empty */
+               timeline->point_oldest = timeline->point_next;
+       }
+
+       /* Release all waiters no longer on the timeline's point list.
+        * Releasing a waiter can trigger this function to be called again, so
+        * we do not store any pointers on stack. */
+       while (NULL != timeline->waiter_tail) {
+               u32 waiter_time_relative;
+               u32 time_head_relative;
+               struct mali_timeline_waiter *waiter = timeline->waiter_tail;
+
+               time_head_relative = timeline->point_next - timeline->point_oldest;
+               waiter_time_relative = waiter->point - timeline->point_oldest;
+
+               if (waiter_time_relative < time_head_relative) {
+                       /* This and all following waiters are on the point list, so we are done. */
+                       break;
+               }
+
+               /* Remove waiter from timeline's waiter list. */
+               if (NULL != waiter->timeline_next) {
+                       waiter->timeline_next->timeline_prev = NULL;
+               } else {
+                       /* This was the last waiter */
+                       timeline->waiter_head = NULL;
+               }
+               timeline->waiter_tail = waiter->timeline_next;
+
+               /* Release waiter.  This could activate a tracker, if this was
+                * the last waiter for the tracker. */
+               schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+       }
+
+       return schedule_mask;
+}
+
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+                                mali_timeline_tracker_type type,
+                                struct mali_timeline_fence *fence,
+                                void *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type);
+
+       /* Zero out all tracker members. */
+       _mali_osk_memset(tracker, 0, sizeof(*tracker));
+
+       tracker->type = type;
+       tracker->job = job;
+       tracker->trigger_ref_count = 1;  /* Prevents any callback from trigging while adding it */
+       tracker->os_tick_create = _mali_osk_time_tickcount();
+       MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC);
+
+       tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE;
+
+       /* Copy fence. */
+       if (NULL != fence) {
+               _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence));
+       }
+}
+
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker)
+{
+       struct mali_timeline *timeline;
+       struct mali_timeline_system *system;
+       struct mali_timeline_tracker *tracker_next, *tracker_prev;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       u32 tid = _mali_osk_get_tid();
+
+       /* Upon entry a group lock will be held, but not a scheduler lock. */
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+       /* Tracker should have been triggered */
+       MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count);
+
+       /* All waiters should have been released at this point */
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+
+       MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job));
+
+       timeline = tracker->timeline;
+       if (NULL == timeline) {
+               /* Tracker was not on a timeline, there is nothing to release. */
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       system = timeline->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       /* Tracker should still be on timeline */
+       MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+       MALI_DEBUG_ASSERT( mali_timeline_is_point_on(timeline, tracker->point));
+
+       /* Tracker is no longer valid. */
+       MALI_DEBUG_CODE(tracker->magic = 0);
+
+       tracker_next = tracker->timeline_next;
+       tracker_prev = tracker->timeline_prev;
+       tracker->timeline_next = NULL;
+       tracker->timeline_prev = NULL;
+
+       /* Removing tracker from timeline's tracker list */
+       if (NULL == tracker_next) {
+               /* This tracker was the head */
+               timeline->tracker_head = tracker_prev;
+       } else {
+               tracker_next->timeline_prev = tracker_prev;
+       }
+
+       if (NULL == tracker_prev) {
+               /* This tracker was the tail */
+               timeline->tracker_tail = tracker_next;
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+               /* Update the timeline's oldest time and release any waiters */
+               schedule_mask |= mali_timeline_update_oldest_point(timeline);
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+       } else {
+               tracker_prev->timeline_next = tracker_next;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       /* Update delayed work only when it is the soft job timeline */
+       if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+               mali_timeline_update_delayed_work(tracker->timeline);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       return schedule_mask;
+}
+
+void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
+        struct mali_timeline_waiter *tail,
+        struct mali_timeline_waiter *head)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(head);
+       MALI_DEBUG_ASSERT_POINTER(tail);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       head->tracker_next = system->waiter_empty_list;
+       system->waiter_empty_list = tail;
+}
+
+static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       struct mali_timeline_system *system;
+       struct mali_timeline *timeline;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+       system = tracker->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       tracker->os_tick_activate = _mali_osk_time_tickcount();
+
+       if (NULL != tracker->waiter_head) {
+               mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head);
+               tracker->waiter_head = NULL;
+               tracker->waiter_tail = NULL;
+       }
+
+       switch (tracker->type) {
+       case MALI_TIMELINE_TRACKER_GP:
+               schedule_mask = mali_gp_scheduler_activate_job((struct mali_gp_job *) tracker->job);
+               break;
+       case MALI_TIMELINE_TRACKER_PP:
+               schedule_mask = mali_pp_scheduler_activate_job((struct mali_pp_job *) tracker->job);
+               break;
+       case MALI_TIMELINE_TRACKER_SOFT:
+               timeline = tracker->timeline;
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+
+               /* Start a soft timer to make sure the soft job be released in a limited time */
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+               mali_timeline_update_delayed_work(timeline);
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+               break;
+       case MALI_TIMELINE_TRACKER_WAIT:
+               mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job);
+               break;
+       case MALI_TIMELINE_TRACKER_SYNC:
+#if defined(CONFIG_SYNC)
+               mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job);
+#else
+               MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type));
+#endif /* defined(CONFIG_SYNC) */
+               break;
+       default:
+               MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type));
+               break;
+       }
+
+       return schedule_mask;
+}
+
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker)
+{
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+       tracker->trigger_ref_count++;
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error)
+{
+       u32 tid = _mali_osk_get_tid();
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+       tracker->trigger_ref_count--;
+
+       tracker->activation_error |= activation_error;
+
+       if (0 == tracker->trigger_ref_count) {
+               schedule_mask |= mali_timeline_tracker_activate(tracker);
+               tracker = NULL;
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       return schedule_mask;
+}
+
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+       MALI_DEBUG_ASSERT_POINTER(uk_fence);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               fence->points[i] = uk_fence->points[i];
+       }
+
+       fence->sync_fd = uk_fence->sync_fd;
+}
+
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session)
+{
+       u32 i;
+       struct mali_timeline_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n"));
+
+       system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system));
+       if (NULL == system) {
+               return NULL;
+       }
+
+       system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+       if (NULL == system->spinlock) {
+               mali_timeline_system_destroy(system);
+               return NULL;
+       }
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i);
+               if (NULL == system->timelines[i]) {
+                       mali_timeline_system_destroy(system);
+                       return NULL;
+               }
+       }
+
+#if defined(CONFIG_SYNC)
+       system->signaled_sync_tl = mali_sync_timeline_create("mali-always-signaled");
+       if (NULL == system->signaled_sync_tl) {
+               mali_timeline_system_destroy(system);
+               return NULL;
+       }
+#endif /* defined(CONFIG_SYNC) */
+
+       system->waiter_empty_list = NULL;
+       system->session = session;
+       system->timer_enabled = MALI_TRUE;
+
+       system->wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == system->wait_queue) {
+               mali_timeline_system_destroy(system);
+               return NULL;
+       }
+
+       return system;
+}
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Check if there are any trackers left on timeline.
+ *
+ * Used as a wait queue conditional.
+ *
+ * @param data Timeline.
+ * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_has_no_trackers(void *data)
+{
+       struct mali_timeline *timeline = (struct mali_timeline *) data;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       return mali_timeline_is_empty(timeline);
+}
+
+/**
+ * Cancel sync fence waiters waited upon by trackers on all timelines.
+ *
+ * Will return after all timelines have no trackers left.
+ *
+ * @param system Timeline system.
+ */
+static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system)
+{
+       u32 i;
+       u32 tid = _mali_osk_get_tid();
+       struct mali_timeline_tracker *tracker, *tracker_next;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list);
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       /* Cancel sync fence waiters. */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               tracker_next = timeline->tracker_tail;
+               while (NULL != tracker_next) {
+                       tracker = tracker_next;
+                       tracker_next = tracker->timeline_next;
+
+                       if (NULL == tracker->sync_fence) continue;
+
+                       MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker));
+
+                       /* Cancel sync fence waiter. */
+                       if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+                               /* Callback was not called, move tracker to local list. */
+                               _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list);
+                       }
+               }
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */
+       _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) {
+               mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter);
+       }
+
+       /* Sleep until all sync fence callbacks are done and all timelines are empty. */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+       }
+}
+
+#endif /* defined(CONFIG_SYNC) */
+
+void mali_timeline_system_abort(struct mali_timeline_system *system)
+{
+       MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session));
+
+#if defined(CONFIG_SYNC)
+       mali_timeline_cancel_sync_fence_waiters(system);
+#endif /* defined(CONFIG_SYNC) */
+
+       /* Should not be any waiters or trackers left at this point. */
+       MALI_DEBUG_CODE( {
+               u32 i;
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+               for (i = 0; i < MALI_TIMELINE_MAX; ++i)
+               {
+                       struct mali_timeline *timeline = system->timelines[i];
+                       MALI_DEBUG_ASSERT_POINTER(timeline);
+                       MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+                       MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+                       MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+                       MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+                       MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+               }
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+       });
+}
+
+void mali_timeline_system_destroy(struct mali_timeline_system *system)
+{
+       u32 i;
+       struct mali_timeline_waiter *waiter, *next;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
+
+       if (NULL != system) {
+               /* There should be no waiters left on this queue. */
+               if (NULL != system->wait_queue) {
+                       _mali_osk_wait_queue_term(system->wait_queue);
+                       system->wait_queue = NULL;
+               }
+
+               /* Free all waiters in empty list */
+               waiter = system->waiter_empty_list;
+               while (NULL != waiter) {
+                       next = waiter->tracker_next;
+                       _mali_osk_free(waiter);
+                       waiter = next;
+               }
+
+#if defined(CONFIG_SYNC)
+               if (NULL != system->signaled_sync_tl) {
+                       sync_timeline_destroy(system->signaled_sync_tl);
+               }
+#endif /* defined(CONFIG_SYNC) */
+
+               for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+                       if (NULL != system->timelines[i]) {
+                               mali_timeline_destroy(system->timelines[i]);
+                       }
+               }
+               if (NULL != system->spinlock) {
+                       mali_spinlock_reentrant_term(system->spinlock);
+               }
+
+               _mali_osk_free(system);
+       }
+}
+
+/**
+ * Find how many waiters are needed for a given fence.
+ *
+ * @param fence The fence to check.
+ * @return Number of waiters needed for fence.
+ */
+static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence)
+{
+       u32 i, num_waiters = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               if (MALI_TIMELINE_NO_POINT != fence->points[i]) {
+                       ++num_waiters;
+               }
+       }
+
+#if defined(CONFIG_SYNC)
+       if (-1 != fence->sync_fd) ++num_waiters;
+#endif /* defined(CONFIG_SYNC) */
+
+       return num_waiters;
+}
+
+static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system)
+{
+       struct mali_timeline_waiter *waiter;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       waiter = system->waiter_empty_list;
+       if (NULL != waiter) {
+               /* Remove waiter from empty list and zero it */
+               system->waiter_empty_list = waiter->tracker_next;
+               _mali_osk_memset(waiter, 0, sizeof(*waiter));
+       }
+
+       /* Return NULL if list was empty. */
+       return waiter;
+}
+
+static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
+        struct mali_timeline_waiter **tail,
+        struct mali_timeline_waiter **head,
+        int max_num_waiters)
+{
+       u32 i, tid = _mali_osk_get_tid();
+       mali_bool do_alloc;
+       struct mali_timeline_waiter *waiter;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(tail);
+       MALI_DEBUG_ASSERT_POINTER(head);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       *head = *tail = NULL;
+       do_alloc = MALI_FALSE;
+       i = 0;
+       while (i < max_num_waiters) {
+               if (MALI_FALSE == do_alloc) {
+                       waiter = mali_timeline_system_get_zeroed_waiter(system);
+                       if (NULL == waiter) {
+                               do_alloc = MALI_TRUE;
+                               mali_spinlock_reentrant_signal(system->spinlock, tid);
+                               continue;
+                       }
+               } else {
+                       waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter));
+                       if (NULL == waiter) break;
+               }
+               ++i;
+               if (NULL == *tail) {
+                       *tail = waiter;
+                       *head = waiter;
+               } else {
+                       (*head)->tracker_next = waiter;
+                       *head = waiter;
+               }
+       }
+       if (MALI_TRUE == do_alloc) {
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+       }
+}
+
+/**
+ * Create waiters for the given tracker. The tracker is activated when all waiters are release.
+ *
+ * @note Tracker can potentially be activated before this function returns.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker we will create waiters for.
+ * @param waiter_tail List of pre-allocated waiters.
+ * @param waiter_head List of pre-allocated waiters.
+ */
+static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
+        struct mali_timeline_tracker *tracker,
+        struct mali_timeline_waiter *waiter_tail,
+        struct mali_timeline_waiter *waiter_head)
+{
+       int i;
+       u32 tid = _mali_osk_get_tid();
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+#if defined(CONFIG_SYNC)
+       struct sync_fence *sync_fence = NULL;
+#endif /* defined(CONFIG_SYNC) */
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+       MALI_DEBUG_ASSERT(NULL != tracker->job);
+
+       /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter
+        * into the timelines sorted list of waiters */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               mali_timeline_point point;
+               struct mali_timeline *timeline;
+               struct mali_timeline_waiter *waiter;
+
+               /* Get point on current timeline from tracker's fence. */
+               point = tracker->fence.points[i];
+
+               if (likely(MALI_TIMELINE_NO_POINT == point)) {
+                       /* Fence contains no point on this timeline so we don't need a waiter. */
+                       continue;
+               }
+
+               timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+                       MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
+                                         point, timeline->point_oldest, timeline->point_next));
+                       continue;
+               }
+
+               if (likely(mali_timeline_is_point_released(timeline, point))) {
+                       /* Tracker representing the point has been released so we don't need a
+                        * waiter. */
+                       continue;
+               }
+
+               /* The point is on timeline. */
+               MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
+
+               /* Get a new zeroed waiter object. */
+               if (likely(NULL != waiter_tail)) {
+                       waiter = waiter_tail;
+                       waiter_tail = waiter_tail->tracker_next;
+               } else {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+                       continue;
+               }
+
+               /* Yanking the trigger ref count of the tracker. */
+               tracker->trigger_ref_count++;
+
+               waiter->point   = point;
+               waiter->tracker = tracker;
+
+               /* Insert waiter on tracker's singly-linked waiter list. */
+               if (NULL == tracker->waiter_head) {
+                       /* list is empty */
+                       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+                       tracker->waiter_tail = waiter;
+               } else {
+                       tracker->waiter_head->tracker_next = waiter;
+               }
+               tracker->waiter_head = waiter;
+
+               /* Add waiter to timeline. */
+               mali_timeline_insert_waiter(timeline, waiter);
+       }
+#if defined(CONFIG_SYNC)
+       if (-1 != tracker->fence.sync_fd) {
+               int ret;
+               struct mali_timeline_waiter *waiter;
+
+               sync_fence = sync_fence_fdget(tracker->fence.sync_fd);
+               if (unlikely(NULL == sync_fence)) {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd));
+                       goto exit;
+               }
+
+               /* Check if we have a zeroed waiter object available. */
+               if (unlikely(NULL == waiter_tail)) {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+                       goto exit;
+               }
+
+               /* Start asynchronous wait that will release waiter when the fence is signaled. */
+               sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+               ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+               if (1 == ret) {
+                       /* Fence already signaled, no waiter needed. */
+                       goto exit;
+               } else if (0 != ret) {
+                       MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
+                       tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+                       goto exit;
+               }
+
+               /* Grab new zeroed waiter object. */
+               waiter = waiter_tail;
+               waiter_tail = waiter_tail->tracker_next;
+
+               /* Increase the trigger ref count of the tracker. */
+               tracker->trigger_ref_count++;
+
+               waiter->point   = MALI_TIMELINE_NO_POINT;
+               waiter->tracker = tracker;
+
+               /* Insert waiter on tracker's singly-linked waiter list. */
+               if (NULL == tracker->waiter_head) {
+                       /* list is empty */
+                       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+                       tracker->waiter_tail = waiter;
+               } else {
+                       tracker->waiter_head->tracker_next = waiter;
+               }
+               tracker->waiter_head = waiter;
+
+               /* Also store waiter in separate field for easy access by sync callback. */
+               tracker->waiter_sync = waiter;
+
+               /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */
+               tracker->sync_fence = sync_fence;
+
+               sync_fence = NULL;
+       }
+exit:
+#endif /* defined(CONFIG_SYNC) */
+
+       if (NULL != waiter_tail) {
+               mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
+       }
+
+       /* Release the initial trigger ref count. */
+       tracker->trigger_ref_count--;
+
+       /* If there were no waiters added to this tracker we activate immediately. */
+       if (0 == tracker->trigger_ref_count) {
+               schedule_mask |= mali_timeline_tracker_activate(tracker);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+       if (NULL != sync_fence) {
+               sync_fence_put(sync_fence);
+       }
+#endif /* defined(CONFIG_SYNC) */
+
+       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+        struct mali_timeline_tracker *tracker,
+        enum mali_timeline_id timeline_id)
+{
+       int num_waiters = 0;
+       struct mali_timeline_waiter *waiter_tail, *waiter_head;
+       u32 tid = _mali_osk_get_tid();
+       mali_timeline_point point = MALI_TIMELINE_NO_POINT;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id));
+
+       MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+       tracker->system = system;
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+
+       /* Allocate waiters. */
+       mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       /* Add tracker to timeline.  This will allocate a point for the tracker on the timeline. If
+        * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the
+        * point will be MALI_TIMELINE_NO_POINT.
+        *
+        * NOTE: the tracker can fail to be added if the timeline is full.  If this happens, the
+        * point will be MALI_TIMELINE_NO_POINT. */
+       MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE);
+       if (likely(timeline_id < MALI_TIMELINE_MAX)) {
+               struct mali_timeline *timeline = system->timelines[timeline_id];
+               mali_timeline_insert_tracker(timeline, tracker);
+               MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+       }
+
+       point = tracker->point;
+
+       /* Create waiters for tracker based on supplied fence.  Each waiter will increase the
+        * trigger ref count. */
+       mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head);
+       tracker = NULL;
+
+       /* At this point the tracker object might have been freed so we should no longer
+        * access it. */
+
+
+       /* The tracker will always be activated after calling add_tracker, even if NO_POINT is
+        * returned. */
+       return point;
+}
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+        struct mali_timeline_waiter *waiter)
+{
+       struct mali_timeline_tracker *tracker;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       tracker = waiter->tracker;
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       /* At this point the waiter has been removed from the timeline's waiter list, but it is
+        * still on the tracker's waiter list.  All of the tracker's waiters will be released when
+        * the tracker is activated. */
+
+       waiter->point   = MALI_TIMELINE_NO_POINT;
+       waiter->tracker = NULL;
+
+       tracker->trigger_ref_count--;
+       if (0 == tracker->trigger_ref_count) {
+               /* This was the last waiter; activate tracker */
+               schedule_mask |= mali_timeline_tracker_activate(tracker);
+               tracker = NULL;
+       }
+
+       return schedule_mask;
+}
+
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+        enum mali_timeline_id timeline_id)
+{
+       mali_timeline_point point;
+       struct mali_timeline *timeline;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       if (MALI_TIMELINE_MAX <= timeline_id) {
+               return MALI_TIMELINE_NO_POINT;
+       }
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       timeline = system->timelines[timeline_id];
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       point = MALI_TIMELINE_NO_POINT;
+       if (timeline->point_oldest != timeline->point_next) {
+               point = timeline->point_next - 1;
+               if (MALI_TIMELINE_NO_POINT == point) point--;
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       return point;
+}
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
+{
+       struct mali_timeline *timeline;
+       struct mali_timeline_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       MALI_DEBUG_ASSERT_POINTER(tracker->timeline);
+       timeline = tracker->timeline;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline->system);
+       system = timeline->system;
+
+       if (MALI_TIMELINE_MAX > id) {
+               return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+       } else {
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
+               return MALI_FALSE;
+       }
+}
+
+static const char *timeline_id_to_string(enum mali_timeline_id id)
+{
+       switch (id) {
+       case MALI_TIMELINE_GP:
+               return "  GP";
+       case MALI_TIMELINE_PP:
+               return "  PP";
+       case MALI_TIMELINE_SOFT:
+               return "SOFT";
+       default:
+               return "NONE";
+       }
+}
+
+static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type)
+{
+       switch (type) {
+       case MALI_TIMELINE_TRACKER_GP:
+               return "  GP";
+       case MALI_TIMELINE_TRACKER_PP:
+               return "  PP";
+       case MALI_TIMELINE_TRACKER_SOFT:
+               return "SOFT";
+       case MALI_TIMELINE_TRACKER_WAIT:
+               return "WAIT";
+       case MALI_TIMELINE_TRACKER_SYNC:
+               return "SYNC";
+       default:
+               return "INVALID";
+       }
+}
+
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker)
+{
+       struct mali_timeline *timeline = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       timeline = tracker->timeline;
+
+       if (0 != tracker->trigger_ref_count) {
+               return MALI_TIMELINE_TS_WAITING;
+       }
+
+       if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) {
+               return MALI_TIMELINE_TS_ACTIVE;
+       }
+
+       if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) {
+               return MALI_TIMELINE_TS_INIT;
+       }
+
+       return MALI_TIMELINE_TS_FINISH;
+}
+
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker)
+{
+       const char *tracker_state = "IWAF";
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       if (0 != tracker->trigger_ref_count) {
+               MALI_PRINTF(("TL:  %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u,%d]  (0x%08X)\n",
+                            timeline_tracker_type_to_string(tracker->type), tracker->point,
+                            *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
+                            tracker->trigger_ref_count,
+                            is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+                            is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+                            is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+                            tracker->fence.sync_fd, tracker->job));
+       } else {
+               MALI_PRINTF(("TL:  %s %u %c  (0x%08X)\n",
+                            timeline_tracker_type_to_string(tracker->type), tracker->point,
+                            *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
+                            tracker->job));
+       }
+}
+
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline)
+{
+       struct mali_timeline_tracker *tracker = NULL;
+       int i_max = 30;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       tracker = timeline->tracker_tail;
+       while (NULL != tracker && 0 < --i_max) {
+               mali_timeline_debug_print_tracker(tracker);
+               tracker = tracker->timeline_next;
+       }
+
+       if (0 == i_max) {
+               MALI_PRINTF(("TL: Too many trackers in list to print\n"));
+       }
+}
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system)
+{
+       int i;
+       int num_printed = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       /* Print all timelines */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (NULL == timeline->tracker_head) continue;
+
+               MALI_PRINTF(("TL: Timeline %s:\n",
+                            timeline_id_to_string((enum mali_timeline_id)i)));
+               mali_timeline_debug_print_timeline(timeline);
+               num_printed++;
+       }
+
+       if (0 == num_printed) {
+               MALI_PRINTF(("TL: All timelines empty\n"));
+       }
+}
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline.h
new file mode 100644 (file)
index 0000000..42c5047
--- /dev/null
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMELINE_H__
+#define __MALI_TIMELINE_H__
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+#include "mali_sync.h"
+#include "mali_scheduler_types.h"
+
+/**
+ * Soft job timeout.
+ *
+ * Soft jobs have to be signaled as complete after activation.  Normally this is done by user space,
+ * but in order to guarantee that every soft job is completed, we also have a timer.
+ */
+#define MALI_TIMELINE_TIMEOUT_HZ ((u32) (HZ * 3 / 2)) /* 1500 ms. */
+
+/**
+ * Timeline type.
+ */
+typedef enum mali_timeline_id {
+       MALI_TIMELINE_GP   = MALI_UK_TIMELINE_GP,   /**< GP job timeline. */
+       MALI_TIMELINE_PP   = MALI_UK_TIMELINE_PP,   /**< PP job timeline. */
+       MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */
+       MALI_TIMELINE_MAX  = MALI_UK_TIMELINE_MAX
+} mali_timeline_id;
+
+/**
+ * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker).
+ */
+#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX
+
+/**
+ * Tracker type.
+ */
+typedef enum mali_timeline_tracker_type {
+       MALI_TIMELINE_TRACKER_GP   = 0, /**< Tracker used by GP jobs. */
+       MALI_TIMELINE_TRACKER_PP   = 1, /**< Tracker used by PP jobs. */
+       MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */
+       MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */
+       MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */
+       MALI_TIMELINE_TRACKER_MAX  = 5,
+} mali_timeline_tracker_type;
+
+/**
+ * Tracker activation error.
+ */
+typedef u32 mali_timeline_activation_error;
+#define MALI_TIMELINE_ACTIVATION_ERROR_NONE      0
+#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT  (1<<1)
+#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0)
+
+/**
+ * Type used to represent a point on a timeline.
+ */
+typedef u32 mali_timeline_point;
+
+/**
+ * Used to represent that no point on a timeline.
+ */
+#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0)
+
+/**
+ * The maximum span of points on a timeline.  A timeline will be considered full if the difference
+ * between the oldest and newest points is equal or larger to this value.
+ */
+#define MALI_TIMELINE_MAX_POINT_SPAN 65536
+
+/**
+ * Magic value used to assert on validity of trackers.
+ */
+#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd
+
+struct mali_timeline;
+struct mali_timeline_waiter;
+struct mali_timeline_tracker;
+
+/**
+ * Timeline fence.
+ */
+struct mali_timeline_fence {
+       mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */
+       s32                 sync_fd;                   /**< A file descriptor representing a sync fence, or -1. */
+};
+
+/**
+ * Timeline system.
+ *
+ * The Timeline system has a set of timelines associated with a session.
+ */
+struct mali_timeline_system {
+       struct mali_spinlock_reentrant *spinlock;   /**< Spin lock protecting the timeline system */
+       struct mali_timeline           *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */
+
+       /* Single-linked list of unused waiter objects.  Uses the tracker_next field in tracker. */
+       struct mali_timeline_waiter    *waiter_empty_list;
+
+       struct mali_session_data       *session;    /**< Session that owns this system. */
+
+       mali_bool                       timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */
+
+       _mali_osk_wait_queue_t         *wait_queue; /**< Wait queue. */
+
+#if defined(CONFIG_SYNC)
+       struct sync_timeline           *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#endif /* defined(CONFIG_SYNC) */
+};
+
+/**
+ * Timeline.  Each Timeline system will have MALI_TIMELINE_MAX timelines.
+ */
+struct mali_timeline {
+       mali_timeline_point           point_next;   /**< The next available point. */
+       mali_timeline_point           point_oldest; /**< The oldest point not released. */
+
+       /* Double-linked list of trackers.  Sorted in ascending order by tracker->time_number with
+        * tail pointing to the tracker with the oldest time. */
+       struct mali_timeline_tracker *tracker_head;
+       struct mali_timeline_tracker *tracker_tail;
+
+       /* Double-linked list of waiters.  Sorted in ascending order by waiter->time_number_wait
+        * with tail pointing to the waiter with oldest wait time. */
+       struct mali_timeline_waiter  *waiter_head;
+       struct mali_timeline_waiter  *waiter_tail;
+
+       struct mali_timeline_system  *system;       /**< Timeline system this timeline belongs to. */
+       enum mali_timeline_id         id;           /**< Timeline type. */
+
+#if defined(CONFIG_SYNC)
+       struct sync_timeline         *sync_tl;      /**< Sync timeline that corresponds to this timeline. */
+#endif /* defined(CONFIG_SYNC) */
+
+       /* The following fields are used to time out soft job trackers. */
+       _mali_osk_wq_delayed_work_t  *delayed_work;
+       mali_bool                     timer_active;
+};
+
+/**
+ * Timeline waiter.
+ */
+struct mali_timeline_waiter {
+       mali_timeline_point           point;         /**< Point on timeline we are waiting for to be released. */
+       struct mali_timeline_tracker *tracker;       /**< Tracker that is waiting. */
+
+       struct mali_timeline_waiter  *timeline_next; /**< Next waiter on timeline's waiter list. */
+       struct mali_timeline_waiter  *timeline_prev; /**< Previous waiter on timeline's waiter list. */
+
+       struct mali_timeline_waiter  *tracker_next;  /**< Next waiter on tracker's waiter list. */
+};
+
+/**
+ * Timeline tracker.
+ */
+struct mali_timeline_tracker {
+       MALI_DEBUG_CODE(u32            magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */
+
+       mali_timeline_point            point; /**< Point on timeline for this tracker */
+
+       struct mali_timeline_tracker  *timeline_next; /**< Next tracker on timeline's tracker list */
+       struct mali_timeline_tracker  *timeline_prev; /**< Previous tracker on timeline's tracker list */
+
+       u32                            trigger_ref_count; /**< When zero tracker will be activated */
+       mali_timeline_activation_error activation_error;  /**< Activation error. */
+       struct mali_timeline_fence     fence;             /**< Fence used to create this tracker */
+
+       /* Single-linked list of waiters.  Sorted in order of insertions with
+        * tail pointing to first waiter. */
+       struct mali_timeline_waiter   *waiter_head;
+       struct mali_timeline_waiter   *waiter_tail;
+
+#if defined(CONFIG_SYNC)
+       /* These are only used if the tracker is waiting on a sync fence. */
+       struct mali_timeline_waiter   *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */
+       struct sync_fence_waiter       sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+       struct sync_fence             *sync_fence;   /**< The sync fence this tracker is waiting on. */
+       _mali_osk_list_t               sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+#endif /* defined(CONFIG_SYNC) */
+
+       struct mali_timeline_system   *system;       /**< Timeline system. */
+       struct mali_timeline          *timeline;     /**< Timeline, or NULL if not on a timeline. */
+       enum mali_timeline_tracker_type type;        /**< Type of tracker. */
+       void                          *job;          /**< Owner of tracker. */
+
+       /* The following fields are used to time out soft job trackers. */
+       u32                           os_tick_create;
+       u32                           os_tick_activate;
+       mali_bool                     timer_active;
+};
+
+/**
+ * What follows is a set of functions to check the state of a timeline and to determine where on a
+ * timeline a given point is.  Most of these checks will translate the timeline so the oldest point
+ * on the timeline is aligned with zero.  Remember that all of these calculation are done on
+ * unsigned integers.
+ *
+ * The following example illustrates the three different states a point can be in.  The timeline has
+ * been translated to put the oldest point at zero:
+ *
+ *
+ *
+ *                               [ point is in forbidden zone ]
+ *                                          64k wide
+ *                                MALI_TIMELINE_MAX_POINT_SPAN
+ *
+ *    [ point is on timeline     )                            ( point is released ]
+ *
+ *    0--------------------------##############################--------------------2^32 - 1
+ *    ^                          ^
+ *    \                          |
+ *     oldest point on timeline  |
+ *                               \
+ *                                next point on timeline
+ */
+
+/**
+ * Compare two timeline points
+ *
+ * Returns true if a is after b, false if a is before or equal to b.
+ *
+ * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and
+ * the result will be correct if the points is less then UINT_MAX/2 apart.
+ *
+ * @param a Point on timeline
+ * @param b Point on timeline
+ * @return MALI_TRUE if a is after b
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b)
+{
+       return 0 > ((s32)b) - ((s32)a);
+}
+
+/**
+ * Check if a point is on timeline.  A point is on a timeline if it is greater than, or equal to,
+ * the oldest point, and less than the next point.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is on timeline, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Check if a point has been released.  A point is released if it is older than the oldest point on
+ * the timeline, newer than the next point, and also not in the forbidden zone.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       mali_timeline_point point_normalized;
+       mali_timeline_point next_normalized;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       point_normalized = point - timeline->point_oldest;
+       next_normalized = timeline->point_next - timeline->point_oldest;
+
+       return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
+}
+
+/**
+ * Check if a point is valid.  A point is valid if is on the timeline or has been released.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is valid, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point);
+}
+
+/**
+ * Check if timeline is empty (has no points on it).  A timeline is empty if next == oldest.
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is empty, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       return timeline->point_next == timeline->point_oldest;
+}
+
+/**
+ * Check if timeline is full.  A valid timeline cannot span more than 64k points (@ref
+ * MALI_TIMELINE_MAX_POINT_SPAN).
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is full, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Create a new timeline system.
+ *
+ * @param session The session this timeline system will belong to.
+ * @return New timeline system.
+ */
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session);
+
+/**
+ * Abort timeline system.
+ *
+ * This will release all pending waiters in the timeline system causing all trackers to be
+ * activated.
+ *
+ * @param system Timeline system to abort all jobs from.
+ */
+void mali_timeline_system_abort(struct mali_timeline_system *system);
+
+/**
+ * Destroy an empty timeline system.
+ *
+ * @note @ref mali_timeline_system_abort() should be called prior to this function.
+ *
+ * @param system Timeline system to destroy.
+ */
+void mali_timeline_system_destroy(struct mali_timeline_system *system);
+
+/**
+ * Stop the soft job timer.
+ *
+ * @param system Timeline system
+ */
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
+
+/**
+ * Add a tracker to a timeline system and optionally also on a timeline.
+ *
+ * Once added to the timeline system, the tracker is guaranteed to be activated.  The tracker can be
+ * activated before this function returns.  Thus, it is also possible that the tracker is released
+ * before this function returns, depending on the tracker type.
+ *
+ * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the
+ * timeline system.
+ *
+ * @param system Timeline system the tracker will be added to.
+ * @param tracker The tracker to be added.
+ * @param timeline_id Id of the timeline the tracker will be added to, or
+ *                    MALI_TIMELINE_NONE if it should not be added on a timeline.
+ * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
+ */
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+        struct mali_timeline_tracker *tracker,
+        enum mali_timeline_id timeline_id);
+
+/**
+ * Get latest point on timeline.
+ *
+ * @param system Timeline system.
+ * @param timeline_id Id of timeline to get latest point from.
+ * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
+ */
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+        enum mali_timeline_id timeline_id);
+
+/**
+ * Initialize tracker.
+ *
+ * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker).
+ *
+ * @param tracker Tracker to initialize.
+ * @param type Type of tracker.
+ * @param fence Fence used to set up dependencies for tracker.
+ * @param job Pointer to job struct this tracker is associated with.
+ */
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+                                mali_timeline_tracker_type type,
+                                struct mali_timeline_fence *fence,
+                                void *job);
+
+/**
+ * Grab trigger ref count on tracker.
+ *
+ * This will prevent tracker from being activated until the trigger ref count reaches zero.
+ *
+ * @note Tracker must have been initialized (@ref mali_timeline_tracker_init).
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ */
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker);
+
+/**
+ * Release trigger ref count on tracker.
+ *
+ * If the trigger ref count reaches zero, the tracker will be activated.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error);
+
+/**
+ * Release a tracker from the timeline system.
+ *
+ * This is used to signal that the job being tracker is finished, either due to normal circumstances
+ * (job complete/abort) or due to a timeout.
+ *
+ * We may need to schedule some subsystems after a tracker has been released and the returned
+ * bitmask will tell us if it is necessary.  If the return value is non-zero, this value needs to be
+ * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling.
+ *
+ * @note Tracker must have been activated before being released.
+ * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead
+ * to a deadlock.
+ *
+ * @param tracker Tracker being released.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+
+/**
+ * Copy data from a UK fence to a Timeline fence.
+ *
+ * @param fence Timeline fence.
+ * @param uk_fence UK fence.
+ */
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+
+#define MALI_TIMELINE_DEBUG_FUNCTIONS
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+/**
+ * Tracker state.  Used for debug printing.
+ */
+typedef enum mali_timeline_tracker_state {
+       MALI_TIMELINE_TS_INIT    = 0,
+       MALI_TIMELINE_TS_WAITING = 1,
+       MALI_TIMELINE_TS_ACTIVE  = 2,
+       MALI_TIMELINE_TS_FINISH  = 3,
+} mali_timeline_tracker_state;
+
+/**
+ * Get tracker state.
+ *
+ * @param tracker Tracker to check.
+ * @return State of tracker.
+ */
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about tracker.
+ *
+ * @param tracker Tracker to print.
+ */
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about timeline.
+ *
+ * @param timeline Timeline to print.
+ */
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline);
+
+/**
+ * Print debug information about timeline system.
+ *
+ * @param system Timeline system to print.
+ */
+void mali_timeline_debug_print_system(struct mali_timeline_system *system);
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#endif /* __MALI_TIMELINE_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_fence_wait.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_fence_wait.c
new file mode 100644 (file)
index 0000000..4863d69
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timeline_fence_wait.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+
+/**
+ * Allocate a fence waiter tracker.
+ *
+ * @return New fence waiter if successful, NULL if not.
+ */
+static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
+{
+       return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
+}
+
+/**
+ * Free fence waiter tracker.
+ *
+ * @param wait Fence wait tracker to free.
+ */
+static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait)
+{
+       MALI_DEBUG_ASSERT_POINTER(wait);
+       _mali_osk_atomic_term(&wait->refcount);
+       _mali_osk_free(wait);
+}
+
+/**
+ * Check if fence wait tracker has been activated.  Used as a wait queue condition.
+ *
+ * @param data Fence waiter.
+ * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data)
+{
+       struct mali_timeline_fence_wait_tracker *wait;
+
+       wait = (struct mali_timeline_fence_wait_tracker *) data;
+       MALI_DEBUG_ASSERT_POINTER(wait);
+
+       return wait->activated;
+}
+
+/**
+ * Check if fence has been signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Timeline fence.
+ * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+       int i;
+       u32 tid = _mali_osk_get_tid();
+       mali_bool ret = MALI_TRUE;
+#if defined(CONFIG_SYNC)
+       struct sync_fence *sync_fence = NULL;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline;
+               mali_timeline_point   point;
+
+               point = fence->points[i];
+
+               if (likely(MALI_TIMELINE_NO_POINT == point)) {
+                       /* Fence contains no point on this timeline. */
+                       continue;
+               }
+
+               timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+                       MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
+               }
+
+               if (!mali_timeline_is_point_released(timeline, point)) {
+                       ret = MALI_FALSE;
+                       goto exit;
+               }
+       }
+
+#if defined(CONFIG_SYNC)
+       if (-1 != fence->sync_fd) {
+               sync_fence = sync_fence_fdget(fence->sync_fd);
+               if (likely(NULL != sync_fence)) {
+                       if (0 == sync_fence->status) {
+                               ret = MALI_FALSE;
+                       }
+               } else {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
+               }
+       }
+#endif /* defined(CONFIG_SYNC) */
+
+exit:
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+       if (NULL != sync_fence) {
+               sync_fence_put(sync_fence);
+       }
+#endif /* defined(CONFIG_SYNC) */
+
+       return ret;
+}
+
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
+{
+       struct mali_timeline_fence_wait_tracker *wait;
+       mali_timeline_point point;
+       mali_bool ret;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));
+
+       if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
+               return mali_timeline_fence_wait_check_status(system, fence);
+       }
+
+       wait = mali_timeline_fence_wait_tracker_alloc();
+       if (unlikely(NULL == wait)) {
+               MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
+               return MALI_FALSE;
+       }
+
+       wait->activated = MALI_FALSE;
+       wait->system = system;
+
+       /* Initialize refcount to two references.  The reference first will be released by this
+        * function after the wait is over.  The second reference will be released when the tracker
+        * is activated. */
+       _mali_osk_atomic_init(&wait->refcount, 2);
+
+       /* Add tracker to timeline system, but not to a timeline. */
+       mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
+       point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+       MALI_IGNORE(point);
+
+       /* Wait for the tracker to be activated or time out. */
+       if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
+               _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
+       } else {
+               _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
+       }
+
+       ret = wait->activated;
+
+       if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+               mali_timeline_fence_wait_tracker_free(wait);
+       }
+
+       return ret;
+}
+
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(wait);
+       MALI_DEBUG_ASSERT_POINTER(wait->system);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
+       wait->activated = MALI_TRUE;
+
+       _mali_osk_wait_queue_wake_up(wait->system->wait_queue);
+
+       /* Nothing can wait on this tracker, so nothing to schedule after release. */
+       schedule_mask = mali_timeline_tracker_release(&wait->tracker);
+       MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+       MALI_IGNORE(schedule_mask);
+
+       if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+               mali_timeline_fence_wait_tracker_free(wait);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_fence_wait.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_fence_wait.h
new file mode 100644 (file)
index 0000000..fbe5a50
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_fence_wait.h
+ *
+ * This file contains functions used to wait until a Timeline fence is signaled.
+ */
+
+#ifndef __MALI_TIMELINE_FENCE_WAIT_H__
+#define __MALI_TIMELINE_FENCE_WAIT_H__
+
+#include "mali_osk.h"
+#include "mali_timeline.h"
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the
+ * function only returns when the fence is signaled.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1)
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return
+ * immediately with the current state of the fence.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0
+
+/**
+ * Fence wait tracker.
+ *
+ * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a
+ * dependency.  We will then perform a blocking wait, possibly with a timeout, until the tracker is
+ * activated, which happens when the fence is signaled.
+ */
+struct mali_timeline_fence_wait_tracker {
+       mali_bool activated;                  /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */
+       _mali_osk_atomic_t refcount;          /**< Reference count. */
+       struct mali_timeline_system *system;  /**< Timeline system. */
+       struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Wait for a fence to be signaled, or timeout is reached.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to wait on.
+ * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or
+ * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY.
+ * @return MALI_TRUE if signaled, MALI_FALSE if timed out.
+ */
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout);
+
+/**
+ * Used by the Timeline system to activate a fence wait tracker.
+ *
+ * @param fence_wait_tracker Fence waiter tracker.
+ */
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker);
+
+#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_sync_fence.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_sync_fence.c
new file mode 100644 (file)
index 0000000..f8a99a7
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timeline_sync_fence.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Creates a sync fence tracker and a sync fence.  Adds sync fence tracker to Timeline system and
+ * returns sync fence.  The sync fence will be signaled when the sync fence tracker is activated.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return Sync fence that will be signaled when tracker is activated.
+ */
+static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
+       struct sync_fence                       *sync_fence;
+       struct mali_timeline_fence               fence;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       /* Allocate sync fence tracker. */
+       sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
+       if (NULL == sync_fence_tracker) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
+               return NULL;
+       }
+
+       /* Create sync flag. */
+       MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
+       sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
+       if (NULL == sync_fence_tracker->flag) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
+               _mali_osk_free(sync_fence_tracker);
+               return NULL;
+       }
+
+       /* Create sync fence from sync flag. */
+       sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
+       if (NULL == sync_fence) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
+               mali_sync_flag_put(sync_fence_tracker->flag);
+               _mali_osk_free(sync_fence_tracker);
+               return NULL;
+       }
+
+       /* Setup fence for tracker. */
+       _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
+       fence.sync_fd = -1;
+       fence.points[timeline->id] = point;
+
+       /* Finally, add the tracker to Timeline system. */
+       mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
+       point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+
+       return sync_fence;
+}
+
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+       u32 i;
+       struct sync_fence *sync_fence_acc = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline;
+               struct sync_fence *sync_fence;
+
+               if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue;
+
+               timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
+               if (NULL == sync_fence) goto error;
+
+               if (NULL != sync_fence_acc) {
+                       /* Merge sync fences. */
+                       sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+                       if (NULL == sync_fence_acc) goto error;
+               } else {
+                       /* This was the first sync fence created. */
+                       sync_fence_acc = sync_fence;
+               }
+       }
+
+       if (-1 != fence->sync_fd) {
+               struct sync_fence *sync_fence;
+
+               sync_fence = sync_fence_fdget(fence->sync_fd);
+               if (NULL == sync_fence) goto error;
+
+               if (NULL != sync_fence_acc) {
+                       sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+                       if (NULL == sync_fence_acc) goto error;
+               } else {
+                       sync_fence_acc = sync_fence;
+               }
+       }
+
+       if (NULL == sync_fence_acc) {
+               MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl);
+
+               /* There was nothing to wait on, so return an already signaled fence. */
+
+               sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
+               if (NULL == sync_fence_acc) goto error;
+       }
+
+       /* Return file descriptor for the accumulated sync fence. */
+       return mali_sync_fence_fd_alloc(sync_fence_acc);
+
+error:
+       if (NULL != sync_fence_acc) {
+               sync_fence_put(sync_fence_acc);
+       }
+
+       return -1;
+}
+
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));
+
+       /* Signal flag and release reference. */
+       mali_sync_flag_signal(sync_fence_tracker->flag, 0);
+       mali_sync_flag_put(sync_fence_tracker->flag);
+
+       /* Nothing can wait on this tracker, so nothing to schedule after release. */
+       schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
+       MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+
+       _mali_osk_free(sync_fence_tracker);
+}
+
+#endif /* defined(CONFIG_SYNC) */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_sync_fence.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_timeline_sync_fence.h
new file mode 100644 (file)
index 0000000..3c0e314
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_sync_fence.h
+ *
+ * This file contains code related to creating sync fences from timeline fences.
+ */
+
+#ifndef __MALI_TIMELINE_SYNC_FENCE_H__
+#define __MALI_TIMELINE_SYNC_FENCE_H__
+
+#include "mali_timeline.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Sync fence tracker.
+ */
+struct mali_timeline_sync_fence_tracker {
+       struct mali_sync_flag        *flag;    /**< Sync flag used to connect tracker and sync fence. */
+       struct mali_timeline_tracker  tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Create a sync fence that will be signaled when @ref fence is signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to create sync fence from.
+ * @return File descriptor for new sync fence, or -1 on error.
+ */
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence);
+
+/**
+ * Used by the Timeline system to activate a sync fence tracker.
+ *
+ * @param sync_fence_tracker Sync fence tracker.
+ *
+ */
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_ukk.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_ukk.h
new file mode 100644 (file)
index 0000000..4abbb2e
--- /dev/null
@@ -0,0 +1,614 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ *   -  The Device Driver has implemented the _mali_ukk set of functions
+ *   -  The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ *   - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ *     return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ *     void *ctx;
+ *     u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ *  function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ *  interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems,  which would
+ * not otherwise get called on RTOS systems.
+ *     - For example, a  U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * -  Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ *     - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ *     - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ *     - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ *     - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ *  meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ *     - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ *     - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored  elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/* @brief Grant or deny high priority scheduling for this session.
+ *
+ * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe( _mali_uk_mem_write_safe_s *args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if defined(CONFIG_MALI400_UMP)
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* CONFIG_MALI400_UMP */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( void *ctx, _mali_uk_pp_start_job_s *uargs );
+
+/**
+ * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
+ *
+ * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job.
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job( void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( void *ctx, _mali_uk_gp_start_job_s *uargs );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_user_settings_db.c b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_user_settings_db.c
new file mode 100644 (file)
index 0000000..16f23ea
--- /dev/null
@@ -0,0 +1,146 @@
+/**
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+       mali_bool done = MALI_FALSE;
+
+       /*
+        * This function gets a bit complicated because we can't hold the session lock while
+        * allocating notification objects.
+        */
+
+       while (!done) {
+               u32 i;
+               u32 num_sessions_alloc;
+               u32 num_sessions_with_lock;
+               u32 used_notification_objects = 0;
+               _mali_osk_notification_t **notobjs;
+
+               /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+               num_sessions_alloc = mali_session_get_count();
+               if (0 == num_sessions_alloc) {
+                       /* No sessions to report to */
+                       return;
+               }
+
+               notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+               if (NULL == notobjs) {
+                       MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+                       return;
+               }
+
+               for (i = 0; i < num_sessions_alloc; i++) {
+                       notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
+                                    sizeof(_mali_uk_settings_changed_s));
+                       if (NULL != notobjs[i]) {
+                               _mali_uk_settings_changed_s *data;
+                               data = notobjs[i]->result_buffer;
+
+                               data->setting = setting;
+                               data->value = value;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i));
+                       }
+               }
+
+               mali_session_lock();
+
+               /* number of sessions will not change while we hold the lock */
+               num_sessions_with_lock = mali_session_get_count();
+
+               if (num_sessions_alloc >= num_sessions_with_lock) {
+                       /* We have allocated enough notification objects for all the sessions atm */
+                       struct mali_session_data *session, *tmp;
+                       MALI_SESSION_FOREACH(session, tmp, link) {
+                               MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+                               if (NULL != notobjs[used_notification_objects]) {
+                                       mali_session_send_notification(session, notobjs[used_notification_objects]);
+                                       notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+                               }
+                               used_notification_objects++;
+                       }
+                       done = MALI_TRUE;
+               }
+
+               mali_session_unlock();
+
+               /* Delete any remaining/unused notification objects */
+               for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+                       if (NULL != notobjs[used_notification_objects]) {
+                               _mali_osk_notification_delete(notobjs[used_notification_objects]);
+                       }
+               }
+
+               _mali_osk_free(notobjs);
+       }
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+       mali_bool notify = MALI_FALSE;
+
+       if (setting >= _MALI_UK_USER_SETTING_MAX) {
+               MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+               return;
+       }
+
+       if (mali_user_settings[setting] != value) {
+               notify = MALI_TRUE;
+       }
+
+       mali_user_settings[setting] = value;
+
+       if (notify) {
+               mali_user_settings_notify(setting, value);
+       }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+       if (setting >= _MALI_UK_USER_SETTING_MAX) {
+               return 0;
+       }
+
+       return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+       _mali_uk_user_setting_t setting;
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       setting = args->setting;
+
+       if (_MALI_UK_USER_SETTING_MAX > setting) {
+               args->value = mali_user_settings[setting];
+               return _MALI_OSK_ERR_OK;
+       } else {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_user_settings_db.h b/drivers/gpu/arm/mali400/r4p0_rel0/common/mali_user_settings_db.h
new file mode 100644 (file)
index 0000000..1ce9533
--- /dev/null
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif  /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard.h b/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard.h
new file mode 100644 (file)
index 0000000..b300beb
--- /dev/null
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#include "mali_osk_types.h"
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+/* Mali-200 */
+
+#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
+       MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
+       MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
+       MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+/* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+#define MALI_GPU_RESOURCE_L2(addr) \
+       { \
+               .name = "Mali_L2", \
+               .flags = IORESOURCE_MEM, \
+               .start = addr, \
+               .end   = addr + 0x200, \
+       },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+       { \
+               .name = "Mali_GP", \
+               .flags = IORESOURCE_MEM, \
+               .start = gp_addr, \
+               .end =   gp_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = gp_irq, \
+               .end   = gp_irq, \
+       }, \
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+       { \
+               .name = "Mali_GP", \
+               .flags = IORESOURCE_MEM, \
+               .start = gp_addr, \
+               .end =   gp_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = gp_irq, \
+               .end   = gp_irq, \
+       }, \
+       { \
+               .name = "Mali_GP_MMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = gp_mmu_addr, \
+               .end =   gp_mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_MMU_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = gp_mmu_irq, \
+               .end =   gp_mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+       { \
+               .name = "Mali_PP", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_addr, \
+               .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_irq, \
+               .end =   pp_irq, \
+       }, \
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+       { \
+               .name = "Mali_PP" #id, \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_addr, \
+               .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_irq, \
+               .end =   pp_irq, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_MMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_mmu_addr, \
+               .end =   pp_mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_MMU_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_mmu_irq, \
+               .end =   pp_mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+       { \
+               .name = "Mali_MMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = mmu_addr, \
+               .end =   mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_MMU_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = mmu_irq, \
+               .end =   mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+       { \
+               .name = "Mali_PMU", \
+               .flags = IORESOURCE_MEM, \
+               .start = pmu_addr, \
+               .end =   pmu_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_DMA(dma_addr) \
+       { \
+               .name = "Mali_DMA", \
+               .flags = IORESOURCE_MEM, \
+               .start = dma_addr, \
+               .end = dma_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+       { \
+               .name = "Mali_DLBU", \
+               .flags = IORESOURCE_MEM, \
+               .start = dlbu_addr, \
+               .end = dlbu_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+       { \
+               .name = "Mali_Broadcast", \
+               .flags = IORESOURCE_MEM, \
+               .start = bcast_addr, \
+               .end = bcast_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+       { \
+               .name = "Mali_PP_Broadcast", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_addr, \
+               .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP_Broadcast_IRQ", \
+               .flags = IORESOURCE_IRQ, \
+               .start = pp_irq, \
+               .end =   pp_irq, \
+       }, \
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+       { \
+               .name = "Mali_PP_MMU_Broadcast", \
+               .flags = IORESOURCE_MEM, \
+               .start = pp_mmu_bcast_addr, \
+               .end = pp_mmu_bcast_addr + 0x100, \
+       },
+
+struct mali_gpu_utilization_data {
+       unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+       unsigned int utilization_gp;  /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+       unsigned int utilization_pp;  /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+       unsigned int number_of_window_jobs;
+       unsigned int number_of_window_jobs_under_pressure;
+#endif
+};
+
+struct mali_gpu_device_data {
+       /* Dedicated GPU memory range (physical). */
+       unsigned long dedicated_mem_start;
+       unsigned long dedicated_mem_size;
+
+       /* Shared GPU memory */
+       unsigned long shared_mem_size;
+
+       /* Frame buffer memory to be accessible by Mali GPU (physical) */
+       unsigned long fb_start;
+       unsigned long fb_size;
+
+       /* Max runtime [ms] for jobs */
+       int max_job_runtime;
+
+       /* Report GPU utilization in this interval (specified in ms) */
+       unsigned long utilization_interval;
+
+       /* Function that will receive periodic GPU utilization numbers */
+       void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+       /*
+        * Mali PMU switch delay.
+        * Only needed if the power gates are connected to the PMU in a high fanout
+        * network. This value is the number of Mali clock cycles it takes to
+        * enable the power gates and turn on the power mesh.
+        * This value will have no effect if a daisy chain implementation is used.
+        */
+       u32 pmu_switch_delay;
+
+
+       /* Mali Dynamic power domain configuration in sequence from 0-11
+        *  GP  PP0 PP1  PP2  PP3  PP4  PP5  PP6  PP7, L2$0 L2$1 L2$2
+        */
+       u16 pmu_domain_config[12];
+
+       /* Fuction that platform callback for freq tunning, needed when POWER_PERFORMANCE_POLICY enabled*/
+       int (*set_freq_callback)(unsigned int mhz);
+};
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ */
+int mali_pmu_powerdown(void);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ */
+int mali_pmu_powerup(void);
+
+/**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ */
+void mali_dev_pause(void);
+
+/**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+void mali_dev_resume(void);
+
+/** @brief Set the desired number of PP cores to use.
+ *
+ * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+ *
+ * @param num_cores The number of desired cores
+ * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+ */
+int mali_perf_set_num_pp_cores(unsigned int num_cores);
+
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_counters.h b/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_counters.h
new file mode 100644 (file)
index 0000000..5513a6c
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_COUNTERS_H_
+#define _MALI_UTGARD_COUNTERS_H_
+
+typedef struct {
+       void *unused;
+} mali_cinstr_counter_info;
+
+typedef enum {
+       MALI_CINSTR_COUNTER_SOURCE_EGL      =     0,
+       MALI_CINSTR_COUNTER_SOURCE_OPENGLES =  1000,
+       MALI_CINSTR_COUNTER_SOURCE_OPENVG   =  2000,
+       MALI_CINSTR_COUNTER_SOURCE_GP       =  3000,
+       MALI_CINSTR_COUNTER_SOURCE_PP       =  4000,
+} cinstr_counter_source;
+
+#define MALI_CINSTR_EGL_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_EGL
+#define MALI_CINSTR_EGL_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_EGL + 999)
+
+#define MALI_CINSTR_GLES_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENGLES
+#define MALI_CINSTR_GLES_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 999)
+
+#define MALI_CINSTR_VG_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENVG
+#define MALI_CINSTR_VG_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENVG + 999)
+
+#define MALI_CINSTR_GP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_GP
+#define MALI_CINSTR_GP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_GP + 999)
+
+#define MALI_CINSTR_PP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_PP
+#define MALI_CINSTR_PP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_PP + 999)
+
+
+typedef enum {
+       /* EGL counters */
+
+       MALI_CINSTR_EGL_BLIT_TIME                                            = MALI_CINSTR_COUNTER_SOURCE_EGL + 0,
+
+       /* Last counter in the EGL set */
+       MALI_CINSTR_EGL_MAX_COUNTER                                           = MALI_CINSTR_COUNTER_SOURCE_EGL + 1,
+
+       /* GLES counters */
+
+       MALI_CINSTR_GLES_DRAW_ELEMENTS_CALLS                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 0,
+       MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_INDICES                           = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 1,
+       MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED                       = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 2,
+       MALI_CINSTR_GLES_DRAW_ARRAYS_CALLS                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 3,
+       MALI_CINSTR_GLES_DRAW_ARRAYS_NUM_TRANSFORMED                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 4,
+       MALI_CINSTR_GLES_DRAW_POINTS                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 5,
+       MALI_CINSTR_GLES_DRAW_LINES                                          = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 6,
+       MALI_CINSTR_GLES_DRAW_LINE_LOOP                                      = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 7,
+       MALI_CINSTR_GLES_DRAW_LINE_STRIP                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 8,
+       MALI_CINSTR_GLES_DRAW_TRIANGLES                                      = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 9,
+       MALI_CINSTR_GLES_DRAW_TRIANGLE_STRIP                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 10,
+       MALI_CINSTR_GLES_DRAW_TRIANGLE_FAN                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 11,
+       MALI_CINSTR_GLES_NON_VBO_DATA_COPY_TIME                              = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 12,
+       MALI_CINSTR_GLES_UNIFORM_BYTES_COPIED_TO_MALI                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 13,
+       MALI_CINSTR_GLES_UPLOAD_TEXTURE_TIME                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 14,
+       MALI_CINSTR_GLES_UPLOAD_VBO_TIME                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 15,
+       MALI_CINSTR_GLES_NUM_FLUSHES                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 16,
+       MALI_CINSTR_GLES_NUM_VSHADERS_GENERATED                              = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 17,
+       MALI_CINSTR_GLES_NUM_FSHADERS_GENERATED                              = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 18,
+       MALI_CINSTR_GLES_VSHADER_GEN_TIME                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 19,
+       MALI_CINSTR_GLES_FSHADER_GEN_TIME                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 20,
+       MALI_CINSTR_GLES_INPUT_TRIANGLES                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 21,
+       MALI_CINSTR_GLES_VXCACHE_HIT                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 22,
+       MALI_CINSTR_GLES_VXCACHE_MISS                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 23,
+       MALI_CINSTR_GLES_VXCACHE_COLLISION                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 24,
+       MALI_CINSTR_GLES_CULLED_TRIANGLES                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 25,
+       MALI_CINSTR_GLES_CULLED_LINES                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 26,
+       MALI_CINSTR_GLES_BACKFACE_TRIANGLES                                  = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 27,
+       MALI_CINSTR_GLES_GBCLIP_TRIANGLES                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 28,
+       MALI_CINSTR_GLES_GBCLIP_LINES                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 29,
+       MALI_CINSTR_GLES_TRIANGLES_DRAWN                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 30,
+       MALI_CINSTR_GLES_DRAWCALL_TIME                                       = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 31,
+       MALI_CINSTR_GLES_TRIANGLES_COUNT                                     = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 32,
+       MALI_CINSTR_GLES_INDEPENDENT_TRIANGLES_COUNT                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 33,
+       MALI_CINSTR_GLES_STRIP_TRIANGLES_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 34,
+       MALI_CINSTR_GLES_FAN_TRIANGLES_COUNT                                 = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 35,
+       MALI_CINSTR_GLES_LINES_COUNT                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 36,
+       MALI_CINSTR_GLES_INDEPENDENT_LINES_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 37,
+       MALI_CINSTR_GLES_STRIP_LINES_COUNT                                   = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 38,
+       MALI_CINSTR_GLES_LOOP_LINES_COUNT                                    = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 39,
+       MALI_CINSTR_GLES_POINTS_COUNT                                        = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 40,
+
+       /* Last counter in the GLES set */
+       MALI_CINSTR_GLES_MAX_COUNTER                                         = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 41,
+
+       /* OpenVG counters */
+
+       MALI_CINSTR_VG_MASK_COUNTER                                          = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 0,
+       MALI_CINSTR_VG_CLEAR_COUNTER                                         = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 1,
+       MALI_CINSTR_VG_APPEND_PATH_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 2,
+       MALI_CINSTR_VG_APPEND_PATH_DATA_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 3,
+       MALI_CINSTR_VG_MODIFY_PATH_COORDS_COUNTER                            = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 4,
+       MALI_CINSTR_VG_TRANSFORM_PATH_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 5,
+       MALI_CINSTR_VG_INTERPOLATE_PATH_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 6,
+       MALI_CINSTR_VG_PATH_LENGTH_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 7,
+       MALI_CINSTR_VG_POINT_ALONG_PATH_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 8,
+       MALI_CINSTR_VG_PATH_BOUNDS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 9,
+       MALI_CINSTR_VG_PATH_TRANSFORMED_BOUNDS_COUNTER                       = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 10,
+       MALI_CINSTR_VG_DRAW_PATH_COUNTER                                     = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 11,
+       MALI_CINSTR_VG_CLEAR_IMAGE_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 12,
+       MALI_CINSTR_VG_IMAGE_SUB_DATA_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 13,
+       MALI_CINSTR_VG_GET_IMAGE_SUB_DATA_COUNTER                            = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 14,
+       MALI_CINSTR_VG_COPY_IMAGE_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 15,
+       MALI_CINSTR_VG_DRAW_IMAGE_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 16,
+       MALI_CINSTR_VG_SET_PIXELS_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 17,
+       MALI_CINSTR_VG_WRITE_PIXELS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 18,
+       MALI_CINSTR_VG_GET_PIXELS_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 19,
+       MALI_CINSTR_VG_READ_PIXELS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 20,
+       MALI_CINSTR_VG_COPY_PIXELS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 21,
+       MALI_CINSTR_VG_COLOR_MATRIX_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 22,
+       MALI_CINSTR_VG_CONVOLVE_COUNTER                                      = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 23,
+       MALI_CINSTR_VG_SEPARABLE_CONVOLVE_COUNTER                            = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 24,
+       MALI_CINSTR_VG_GAUSSIAN_BLUR_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 25,
+       MALI_CINSTR_VG_LOOKUP_COUNTER                                        = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 26,
+       MALI_CINSTR_VG_LOOKUP_SINGLE_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 27,
+       MALI_CINSTR_VG_CONTEXT_CREATE_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 28,
+       MALI_CINSTR_VG_STROKED_CUBICS_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 29,
+       MALI_CINSTR_VG_STROKED_QUADS_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 30,
+       MALI_CINSTR_VG_STROKED_ARCS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 31,
+       MALI_CINSTR_VG_STROKED_LINES_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 32,
+       MALI_CINSTR_VG_FILLED_CUBICS_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 33,
+       MALI_CINSTR_VG_FILLED_QUADS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 34,
+       MALI_CINSTR_VG_FILLED_ARCS_COUNTER                                   = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 35,
+       MALI_CINSTR_VG_FILLED_LINES_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 36,
+       MALI_CINSTR_VG_DRAW_PATH_CALLS_COUNTER                               = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 37,
+       MALI_CINSTR_VG_TRIANGLES_COUNTER                                     = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 38,
+       MALI_CINSTR_VG_VERTICES_COUNTER                                      = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 39,
+       MALI_CINSTR_VG_INDICES_COUNTER                                       = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 40,
+       MALI_CINSTR_VG_FILLED_PATHS_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 41,
+       MALI_CINSTR_VG_STROKED_PATHS_COUNTER                                 = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 42,
+       MALI_CINSTR_VG_FILL_EXTRACT_COUNTER                                  = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 43,
+       MALI_CINSTR_VG_DRAW_FILLED_PATH_COUNTER                              = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 44,
+       MALI_CINSTR_VG_STROKE_EXTRACT_COUNTER                                = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 45,
+       MALI_CINSTR_VG_DRAW_STROKED_PATH_COUNTER                             = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 46,
+       MALI_CINSTR_VG_DRAW_PAINT_COUNTER                                    = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 47,
+       MALI_CINSTR_VG_DATA_STRUCTURES_COUNTER                               = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 48,
+       MALI_CINSTR_VG_MEM_PATH_COUNTER                                      = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 49,
+       MALI_CINSTR_VG_RSW_COUNTER                                           = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 50,
+
+       /* Last counter in the VG set */
+       MALI_CINSTR_VG_MAX_COUNTER                                           = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 51,
+
+       /* Mali GP counters */
+
+       MALI_CINSTR_GP_DEPRECATED_0                                          = MALI_CINSTR_COUNTER_SOURCE_GP + 0,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_GP                                      = MALI_CINSTR_COUNTER_SOURCE_GP + 1,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER                           = MALI_CINSTR_COUNTER_SOURCE_GP + 2,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_STORER                           = MALI_CINSTR_COUNTER_SOURCE_GP + 3,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_LOADER                           = MALI_CINSTR_COUNTER_SOURCE_GP + 4,
+       MALI_CINSTR_GP_CYCLES_VERTEX_LOADER_WAITING_FOR_VERTEX_SHADER        = MALI_CINSTR_COUNTER_SOURCE_GP + 5,
+       MALI_CINSTR_GP_NUMBER_OF_WORDS_READ                                  = MALI_CINSTR_COUNTER_SOURCE_GP + 6,
+       MALI_CINSTR_GP_NUMBER_OF_WORDS_WRITTEN                               = MALI_CINSTR_COUNTER_SOURCE_GP + 7,
+       MALI_CINSTR_GP_NUMBER_OF_READ_BURSTS                                 = MALI_CINSTR_COUNTER_SOURCE_GP + 8,
+       MALI_CINSTR_GP_NUMBER_OF_WRITE_BURSTS                                = MALI_CINSTR_COUNTER_SOURCE_GP + 9,
+       MALI_CINSTR_GP_NUMBER_OF_VERTICES_PROCESSED                          = MALI_CINSTR_COUNTER_SOURCE_GP + 10,
+       MALI_CINSTR_GP_NUMBER_OF_VERTICES_FETCHED                            = MALI_CINSTR_COUNTER_SOURCE_GP + 11,
+       MALI_CINSTR_GP_NUMBER_OF_PRIMITIVES_FETCHED                          = MALI_CINSTR_COUNTER_SOURCE_GP + 12,
+       MALI_CINSTR_GP_RESERVED_13                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 13,
+       MALI_CINSTR_GP_NUMBER_OF_BACKFACE_CULLINGS_DONE                      = MALI_CINSTR_COUNTER_SOURCE_GP + 14,
+       MALI_CINSTR_GP_NUMBER_OF_COMMANDS_WRITTEN_TO_TILES                   = MALI_CINSTR_COUNTER_SOURCE_GP + 15,
+       MALI_CINSTR_GP_NUMBER_OF_MEMORY_BLOCKS_ALLOCATED                     = MALI_CINSTR_COUNTER_SOURCE_GP + 16,
+       MALI_CINSTR_GP_RESERVED_17                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 17,
+       MALI_CINSTR_GP_RESERVED_18                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 18,
+       MALI_CINSTR_GP_NUMBER_OF_VERTEX_LOADER_CACHE_MISSES                  = MALI_CINSTR_COUNTER_SOURCE_GP + 19,
+       MALI_CINSTR_GP_RESERVED_20                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 20,
+       MALI_CINSTR_GP_RESERVED_21                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 21,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER_COMMAND_PROCESSOR         = MALI_CINSTR_COUNTER_SOURCE_GP + 22,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_COMMAND_PROCESSOR                  = MALI_CINSTR_COUNTER_SOURCE_GP + 23,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_LIST_WRITER                        = MALI_CINSTR_COUNTER_SOURCE_GP + 24,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_THROUGH_THE_PREPARE_LIST_COMMANDS       = MALI_CINSTR_COUNTER_SOURCE_GP + 25,
+       MALI_CINSTR_GP_RESERVED_26                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 26,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PRIMITIVE_ASSEMBLY                      = MALI_CINSTR_COUNTER_SOURCE_GP + 27,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_VERTEX_FETCHER                     = MALI_CINSTR_COUNTER_SOURCE_GP + 28,
+       MALI_CINSTR_GP_RESERVED_29                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 29,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_BOUNDINGBOX_AND_COMMAND_GENERATOR       = MALI_CINSTR_COUNTER_SOURCE_GP + 30,
+       MALI_CINSTR_GP_RESERVED_31                                           = MALI_CINSTR_COUNTER_SOURCE_GP + 31,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_SCISSOR_TILE_ITERATOR                   = MALI_CINSTR_COUNTER_SOURCE_GP + 32,
+       MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_TILE_ITERATOR                      = MALI_CINSTR_COUNTER_SOURCE_GP + 33,
+       MALI_CINSTR_GP_JOB_COUNT                                             = MALI_CINSTR_COUNTER_SOURCE_GP + 900,
+
+       /* Mali PP counters */
+
+       MALI_CINSTR_PP_ACTIVE_CLOCK_CYCLES_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_PP + 0,
+       MALI_CINSTR_PP_TOTAL_CLOCK_CYCLES_COUNT_REMOVED                      = MALI_CINSTR_COUNTER_SOURCE_PP + 1,
+       MALI_CINSTR_PP_TOTAL_BUS_READS                                       = MALI_CINSTR_COUNTER_SOURCE_PP + 2,
+       MALI_CINSTR_PP_TOTAL_BUS_WRITES                                      = MALI_CINSTR_COUNTER_SOURCE_PP + 3,
+       MALI_CINSTR_PP_BUS_READ_REQUEST_CYCLES_COUNT                         = MALI_CINSTR_COUNTER_SOURCE_PP + 4,
+       MALI_CINSTR_PP_BUS_WRITE_REQUEST_CYCLES_COUNT                        = MALI_CINSTR_COUNTER_SOURCE_PP + 5,
+       MALI_CINSTR_PP_BUS_READ_TRANSACTIONS_COUNT                           = MALI_CINSTR_COUNTER_SOURCE_PP + 6,
+       MALI_CINSTR_PP_BUS_WRITE_TRANSACTIONS_COUNT                          = MALI_CINSTR_COUNTER_SOURCE_PP + 7,
+       MALI_CINSTR_PP_RESERVED_08                                           = MALI_CINSTR_COUNTER_SOURCE_PP + 8,
+       MALI_CINSTR_PP_TILE_WRITEBACK_WRITES                                 = MALI_CINSTR_COUNTER_SOURCE_PP + 9,
+       MALI_CINSTR_PP_STORE_UNIT_WRITES                                     = MALI_CINSTR_COUNTER_SOURCE_PP + 10,
+       MALI_CINSTR_PP_RESERVED_11                                           = MALI_CINSTR_COUNTER_SOURCE_PP + 11,
+       MALI_CINSTR_PP_PALETTE_CACHE_READS                                   = MALI_CINSTR_COUNTER_SOURCE_PP + 12,
+       MALI_CINSTR_PP_TEXTURE_CACHE_UNCOMPRESSED_READS                      = MALI_CINSTR_COUNTER_SOURCE_PP + 13,
+       MALI_CINSTR_PP_POLYGON_LIST_READS                                    = MALI_CINSTR_COUNTER_SOURCE_PP + 14,
+       MALI_CINSTR_PP_RSW_READS                                             = MALI_CINSTR_COUNTER_SOURCE_PP + 15,
+       MALI_CINSTR_PP_VERTEX_CACHE_READS                                    = MALI_CINSTR_COUNTER_SOURCE_PP + 16,
+       MALI_CINSTR_PP_UNIFORM_REMAPPING_READS                               = MALI_CINSTR_COUNTER_SOURCE_PP + 17,
+       MALI_CINSTR_PP_PROGRAM_CACHE_READS                                   = MALI_CINSTR_COUNTER_SOURCE_PP + 18,
+       MALI_CINSTR_PP_VARYING_READS                                         = MALI_CINSTR_COUNTER_SOURCE_PP + 19,
+       MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_READS                             = MALI_CINSTR_COUNTER_SOURCE_PP + 20,
+       MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_REMAPPING_READS                   = MALI_CINSTR_COUNTER_SOURCE_PP + 21,
+       MALI_CINSTR_PP_TEXTURE_CACHE_COMPRESSED_READS                        = MALI_CINSTR_COUNTER_SOURCE_PP + 22,
+       MALI_CINSTR_PP_LOAD_UNIT_READS                                       = MALI_CINSTR_COUNTER_SOURCE_PP + 23,
+       MALI_CINSTR_PP_POLYGON_COUNT                                         = MALI_CINSTR_COUNTER_SOURCE_PP + 24,
+       MALI_CINSTR_PP_PIXEL_RECTANGLE_COUNT                                 = MALI_CINSTR_COUNTER_SOURCE_PP + 25,
+       MALI_CINSTR_PP_LINES_COUNT                                           = MALI_CINSTR_COUNTER_SOURCE_PP + 26,
+       MALI_CINSTR_PP_POINTS_COUNT                                          = MALI_CINSTR_COUNTER_SOURCE_PP + 27,
+       MALI_CINSTR_PP_STALL_CYCLES_POLYGON_LIST_READER                      = MALI_CINSTR_COUNTER_SOURCE_PP + 28,
+       MALI_CINSTR_PP_STALL_CYCLES_TRIANGLE_SETUP                           = MALI_CINSTR_COUNTER_SOURCE_PP + 29,
+       MALI_CINSTR_PP_QUAD_RASTERIZED_COUNT                                 = MALI_CINSTR_COUNTER_SOURCE_PP + 30,
+       MALI_CINSTR_PP_FRAGMENT_RASTERIZED_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_PP + 31,
+       MALI_CINSTR_PP_FRAGMENT_REJECTED_FRAGMENT_KILL_COUNT                 = MALI_CINSTR_COUNTER_SOURCE_PP + 32,
+       MALI_CINSTR_PP_FRAGMENT_REJECTED_FWD_FRAGMENT_KILL_COUNT             = MALI_CINSTR_COUNTER_SOURCE_PP + 33,
+       MALI_CINSTR_PP_FRAGMENT_PASSED_ZSTENCIL_COUNT                        = MALI_CINSTR_COUNTER_SOURCE_PP + 34,
+       MALI_CINSTR_PP_PATCHES_REJECTED_EARLY_Z_STENCIL_COUNT                = MALI_CINSTR_COUNTER_SOURCE_PP + 35,
+       MALI_CINSTR_PP_PATCHES_EVALUATED                                     = MALI_CINSTR_COUNTER_SOURCE_PP + 36,
+       MALI_CINSTR_PP_INSTRUCTION_COMPLETED_COUNT                           = MALI_CINSTR_COUNTER_SOURCE_PP + 37,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_RENDEZVOUS_COUNT                   = MALI_CINSTR_COUNTER_SOURCE_PP + 38,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_VARYING_MISS_COUNT                 = MALI_CINSTR_COUNTER_SOURCE_PP + 39,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_TEXTURE_MISS_COUNT                 = MALI_CINSTR_COUNTER_SOURCE_PP + 40,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_LOAD_MISS_COUNT                    = MALI_CINSTR_COUNTER_SOURCE_PP + 41,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_TILE_READ_MISS_COUNT               = MALI_CINSTR_COUNTER_SOURCE_PP + 42,
+       MALI_CINSTR_PP_INSTRUCTION_FAILED_STORE_MISS_COUNT                   = MALI_CINSTR_COUNTER_SOURCE_PP + 43,
+       MALI_CINSTR_PP_RENDEZVOUS_BREAKAGE_COUNT                             = MALI_CINSTR_COUNTER_SOURCE_PP + 44,
+       MALI_CINSTR_PP_PIPELINE_BUBBLES_CYCLE_COUNT                          = MALI_CINSTR_COUNTER_SOURCE_PP + 45,
+       MALI_CINSTR_PP_TEXTURE_MAPPER_MULTIPASS_COUNT                        = MALI_CINSTR_COUNTER_SOURCE_PP + 46,
+       MALI_CINSTR_PP_TEXTURE_MAPPER_CYCLE_COUNT                            = MALI_CINSTR_COUNTER_SOURCE_PP + 47,
+       MALI_CINSTR_PP_VERTEX_CACHE_HIT_COUNT                                = MALI_CINSTR_COUNTER_SOURCE_PP + 48,
+       MALI_CINSTR_PP_VERTEX_CACHE_MISS_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 49,
+       MALI_CINSTR_PP_VARYING_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 50,
+       MALI_CINSTR_PP_VARYING_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 51,
+       MALI_CINSTR_PP_VARYING_CACHE_CONFLICT_MISS_COUNT                     = MALI_CINSTR_COUNTER_SOURCE_PP + 52,
+       MALI_CINSTR_PP_TEXTURE_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 53,
+       MALI_CINSTR_PP_TEXTURE_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 54,
+       MALI_CINSTR_PP_TEXTURE_CACHE_CONFLICT_MISS_COUNT                     = MALI_CINSTR_COUNTER_SOURCE_PP + 55,
+       MALI_CINSTR_PP_PALETTE_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 200 only */
+       MALI_CINSTR_PP_PALETTE_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 200 only */
+       MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_HIT_COUNT                    = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 400 class only */
+       MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_MISS_COUNT                   = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 400 class only */
+       MALI_CINSTR_PP_LOAD_STORE_CACHE_HIT_COUNT                            = MALI_CINSTR_COUNTER_SOURCE_PP + 58,
+       MALI_CINSTR_PP_LOAD_STORE_CACHE_MISS_COUNT                           = MALI_CINSTR_COUNTER_SOURCE_PP + 59,
+       MALI_CINSTR_PP_PROGRAM_CACHE_HIT_COUNT                               = MALI_CINSTR_COUNTER_SOURCE_PP + 60,
+       MALI_CINSTR_PP_PROGRAM_CACHE_MISS_COUNT                              = MALI_CINSTR_COUNTER_SOURCE_PP + 61,
+       MALI_CINSTR_PP_JOB_COUNT                                             = MALI_CINSTR_COUNTER_SOURCE_PP + 900,
+} cinstr_counters_m200_t;
+
+#endif /*_MALI_UTGARD_COUNTERS_H_*/
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_ioctl.h b/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_ioctl.h
new file mode 100644 (file)
index 0000000..2d202e2
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>       /* file system operations */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE           0x82
+#define MALI_IOC_CORE_BASE      (_MALI_UK_CORE_SUBSYSTEM      + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE    (_MALI_UK_MEMORY_SUBSYSTEM    + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE        (_MALI_UK_PP_SUBSYSTEM        + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE        (_MALI_UK_GP_SUBSYSTEM        + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE     (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
+#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY      _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s *)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT  _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s *)
+#define MALI_IOC_TIMELINE_WAIT              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s *)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s *)
+#define MALI_IOC_SOFT_JOB_START             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s *)
+#define MALI_IOC_SOFT_JOB_SIGNAL            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s *)
+
+#define MALI_IOC_MEM_MAP_EXT                _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT              _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_ATTACH_DMA_BUF         _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
+#define MALI_IOC_MEM_RELEASE_DMA_BUF        _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
+#define MALI_IOC_MEM_ATTACH_UMP             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP            _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_WRITE_SAFE             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s *)
+
+#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_AND_GP_START_JOB        _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET            _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET       _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+
+#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET      _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE      _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+
+#define MALI_IOC_PROFILING_START            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP             _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_PROFILING_GET_CONFIG       _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+/* Deprecated ioctls */
+#define MALI_IOC_MEM_GET_BIG_BLOCK          _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK         _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_INIT                   _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, void *)
+#define MALI_IOC_MEM_TERM                   _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, void *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_profiling_events.h b/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100644 (file)
index 0000000..5c12b64
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_TYPE_SINGLE  = 0 << 24,
+       MALI_PROFILING_EVENT_TYPE_START   = 1 << 24,
+       MALI_PROFILING_EVENT_TYPE_STOP    = 2 << 24,
+       MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+       MALI_PROFILING_EVENT_TYPE_RESUME  = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_CHANNEL_SOFTWARE =  0 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_GP0      =  1 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP0      =  5 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP1      =  6 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP2      =  7 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP3      =  8 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP4      =  9 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP5      = 10 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP6      = 11 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP7      = 12 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_GPU      = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE                  = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME         = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH                 = 2,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS      = 3,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT              = 4,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE            = 5,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE            = 6,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK              = 7,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK             = 8,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC        = 10,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC        = 11,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS   = 13,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK          = 53,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK              = 54,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK            = 55,
+       MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED           = 56,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_MALI_FENCE_DUP    = 57,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SET_PP_JOB_FENCE  = 58,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_WAIT_SYNC         = 59,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH       = 62,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS= 63,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL  = 0,
+       MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL   = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum {
+       /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE            = 0,*/
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI            = 1,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD   = 3,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF     = 4,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF      = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE                     =  0, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL            =  1, /* NOT used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC                    = 26, /* used in some build configurations */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT           = 27, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC           = 28, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP   = 29, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE          = 30, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL       = 31, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS     = 32, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE  = 33, /* NOT used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER         = 34, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER       = 35, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK                 = 36, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK          = 37, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP                 = 38, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING          = 40, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING          = 41, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_MERGE     = 42, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP       = 43,
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS   = 44,
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC            = 45, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE          = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT     = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH         = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE              = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE  = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS      = 2,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS      = 3,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS      = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_DATA_CORE_GP0             =  1,
+       MALI_PROFILING_EVENT_DATA_CORE_PP0             =  5,
+       MALI_PROFILING_EVENT_DATA_CORE_PP1             =  6,
+       MALI_PROFILING_EVENT_DATA_CORE_PP2             =  7,
+       MALI_PROFILING_EVENT_DATA_CORE_PP3             =  8,
+       MALI_PROFILING_EVENT_DATA_CORE_PP4             =  9,
+       MALI_PROFILING_EVENT_DATA_CORE_PP5             = 10,
+       MALI_PROFILING_EVENT_DATA_CORE_PP6             = 11,
+       MALI_PROFILING_EVENT_DATA_CORE_PP7             = 12,
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_profiling_gator_api.h
new file mode 100644 (file)
index 0000000..ebca529
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+/** The list of events supported by the Mali DDK. */
+typedef enum {
+       /* Vertex processor activity */
+       ACTIVITY_VP_0 = 0,
+
+       /* Fragment processor activity */
+       ACTIVITY_FP_0,
+       ACTIVITY_FP_1,
+       ACTIVITY_FP_2,
+       ACTIVITY_FP_3,
+       ACTIVITY_FP_4,
+       ACTIVITY_FP_5,
+       ACTIVITY_FP_6,
+       ACTIVITY_FP_7,
+
+       /* L2 cache counters */
+       COUNTER_L2_0_C0,
+       COUNTER_L2_0_C1,
+       COUNTER_L2_1_C0,
+       COUNTER_L2_1_C1,
+       COUNTER_L2_2_C0,
+       COUNTER_L2_2_C1,
+
+       /* Vertex processor counters */
+       COUNTER_VP_0_C0,
+       COUNTER_VP_0_C1,
+
+       /* Fragment processor counters */
+       COUNTER_FP_0_C0,
+       COUNTER_FP_0_C1,
+       COUNTER_FP_1_C0,
+       COUNTER_FP_1_C1,
+       COUNTER_FP_2_C0,
+       COUNTER_FP_2_C1,
+       COUNTER_FP_3_C0,
+       COUNTER_FP_3_C1,
+       COUNTER_FP_4_C0,
+       COUNTER_FP_4_C1,
+       COUNTER_FP_5_C0,
+       COUNTER_FP_5_C1,
+       COUNTER_FP_6_C0,
+       COUNTER_FP_6_C1,
+       COUNTER_FP_7_C0,
+       COUNTER_FP_7_C1,
+
+       /*
+        * If more hardware counters are added, the _mali_osk_hw_counter_table
+        * below should also be updated.
+        */
+
+       /* EGL software counters */
+       COUNTER_EGL_BLIT_TIME,
+
+       /* GLES software counters */
+       COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_ARRAYS_CALLS,
+       COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_POINTS,
+       COUNTER_GLES_DRAW_LINES,
+       COUNTER_GLES_DRAW_LINE_LOOP,
+       COUNTER_GLES_DRAW_LINE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLES,
+       COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLE_FAN,
+       COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+       COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+       COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+       COUNTER_GLES_UPLOAD_VBO_TIME,
+       COUNTER_GLES_NUM_FLUSHES,
+       COUNTER_GLES_NUM_VSHADERS_GENERATED,
+       COUNTER_GLES_NUM_FSHADERS_GENERATED,
+       COUNTER_GLES_VSHADER_GEN_TIME,
+       COUNTER_GLES_FSHADER_GEN_TIME,
+       COUNTER_GLES_INPUT_TRIANGLES,
+       COUNTER_GLES_VXCACHE_HIT,
+       COUNTER_GLES_VXCACHE_MISS,
+       COUNTER_GLES_VXCACHE_COLLISION,
+       COUNTER_GLES_CULLED_TRIANGLES,
+       COUNTER_GLES_CULLED_LINES,
+       COUNTER_GLES_BACKFACE_TRIANGLES,
+       COUNTER_GLES_GBCLIP_TRIANGLES,
+       COUNTER_GLES_GBCLIP_LINES,
+       COUNTER_GLES_TRIANGLES_DRAWN,
+       COUNTER_GLES_DRAWCALL_TIME,
+       COUNTER_GLES_TRIANGLES_COUNT,
+       COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+       COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+       COUNTER_GLES_FAN_TRIANGLES_COUNT,
+       COUNTER_GLES_LINES_COUNT,
+       COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+       COUNTER_GLES_STRIP_LINES_COUNT,
+       COUNTER_GLES_LOOP_LINES_COUNT,
+
+       /* Framebuffer capture pseudo-counter */
+       COUNTER_FILMSTRIP,
+
+       NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT    ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT     ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER        COUNTER_L2_0_C0
+#define LAST_HW_COUNTER         COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER        COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER         COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER   COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER    COUNTER_FILMSTRIP
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+typedef struct _mali_profiling_core_counters {
+       u32 source0;
+       u32 value0;
+       u32 source1;
+       u32 value1;
+} _mali_profiling_core_counters;
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+typedef struct _mali_profiling_l2_counter_values {
+       struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+} _mali_profiling_l2_counter_values;
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+typedef struct _mali_profiling_mali_version {
+       u32 mali_product_id;
+       u32 mali_version_major;
+       u32 mali_version_minor;
+       u32 num_of_l2_cores;
+       u32 num_of_fp_cores;
+       u32 num_of_vp_cores;
+} _mali_profiling_mali_version;
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_uk_types.h b/drivers/gpu/arm/mali400/r4p0_rel0/include/linux/mali/mali_utgard_uk_types.h
new file mode 100644 (file)
index 0000000..7ccd144
--- /dev/null
@@ -0,0 +1,1132 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Iteration functions depend on these values being consecutive. */
+#define MALI_UK_TIMELINE_GP   0
+#define MALI_UK_TIMELINE_PP   1
+#define MALI_UK_TIMELINE_SOFT 2
+#define MALI_UK_TIMELINE_MAX  3
+
+typedef struct {
+       u32 points[MALI_UK_TIMELINE_MAX];
+       s32 sync_fd;
+} _mali_uk_fence_t;
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum {
+       _MALI_UK_CORE_SUBSYSTEM,      /**< Core Group of U/K calls */
+       _MALI_UK_MEMORY_SUBSYSTEM,    /**< Memory Group of U/K calls */
+       _MALI_UK_PP_SUBSYSTEM,        /**< Fragment Processor Group of U/K calls */
+       _MALI_UK_GP_SUBSYSTEM,        /**< Vertex Processor Group of U/K calls */
+       _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+       _MALI_UK_PMM_SUBSYSTEM,       /**< Power Management Module Group of U/K calls */
+       _MALI_UK_VSYNC_SUBSYSTEM,     /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref  _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum {
+       /** Core functions */
+
+       _MALI_UK_OPEN                    = 0, /**< _mali_ukk_open() */
+       _MALI_UK_CLOSE,                       /**< _mali_ukk_close() */
+       _MALI_UK_WAIT_FOR_NOTIFICATION,       /**< _mali_ukk_wait_for_notification() */
+       _MALI_UK_GET_API_VERSION,             /**< _mali_ukk_get_api_version() */
+       _MALI_UK_POST_NOTIFICATION,           /**< _mali_ukk_post_notification() */
+       _MALI_UK_GET_USER_SETTING,            /**< _mali_ukk_get_user_setting() *//**< [out] */
+       _MALI_UK_GET_USER_SETTINGS,           /**< _mali_ukk_get_user_settings() *//**< [out] */
+       _MALI_UK_REQUEST_HIGH_PRIORITY,       /**< _mali_ukk_request_high_priority() */
+       _MALI_UK_TIMELINE_GET_LATEST_POINT,   /**< _mali_ukk_timeline_get_latest_point() */
+       _MALI_UK_TIMELINE_WAIT,               /**< _mali_ukk_timeline_wait() */
+       _MALI_UK_TIMELINE_CREATE_SYNC_FENCE,  /**< _mali_ukk_timeline_create_sync_fence() */
+       _MALI_UK_SOFT_JOB_START,              /**< _mali_ukk_soft_job_start() */
+       _MALI_UK_SOFT_JOB_SIGNAL,             /**< _mali_ukk_soft_job_signal() */
+
+       /** Memory functions */
+
+       _MALI_UK_INIT_MEM                = 0,    /**< _mali_ukk_init_mem() */
+       _MALI_UK_TERM_MEM,                       /**< _mali_ukk_term_mem() */
+       _MALI_UK_GET_BIG_BLOCK,                  /**< _mali_ukk_get_big_block() */
+       _MALI_UK_FREE_BIG_BLOCK,                 /**< _mali_ukk_free_big_block() */
+       _MALI_UK_MAP_MEM,                        /**< _mali_ukk_mem_mmap() */
+       _MALI_UK_UNMAP_MEM,                      /**< _mali_ukk_mem_munmap() */
+       _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+       _MALI_UK_DUMP_MMU_PAGE_TABLE,            /**< _mali_ukk_mem_dump_mmu_page_table() */
+       _MALI_UK_ATTACH_DMA_BUF,                 /**< _mali_ukk_attach_dma_buf() */
+       _MALI_UK_RELEASE_DMA_BUF,                /**< _mali_ukk_release_dma_buf() */
+       _MALI_UK_DMA_BUF_GET_SIZE,               /**< _mali_ukk_dma_buf_get_size() */
+       _MALI_UK_ATTACH_UMP_MEM,                 /**< _mali_ukk_attach_ump_mem() */
+       _MALI_UK_RELEASE_UMP_MEM,                /**< _mali_ukk_release_ump_mem() */
+       _MALI_UK_MAP_EXT_MEM,                    /**< _mali_uku_map_external_mem() */
+       _MALI_UK_UNMAP_EXT_MEM,                  /**< _mali_uku_unmap_external_mem() */
+       _MALI_UK_VA_TO_MALI_PA,                  /**< _mali_uku_va_to_mali_pa() */
+       _MALI_UK_MEM_WRITE_SAFE,                 /**< _mali_uku_mem_write_safe() */
+
+       /** Common functions for each core */
+
+       _MALI_UK_START_JOB           = 0,     /**< Start a Fragment/Vertex Processor Job on a core */
+       _MALI_UK_GET_NUMBER_OF_CORES,         /**< Get the number of Fragment/Vertex Processor cores */
+       _MALI_UK_GET_CORE_VERSION,            /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+       /** Fragment Processor Functions  */
+
+       _MALI_UK_PP_START_JOB            = _MALI_UK_START_JOB,            /**< _mali_ukk_pp_start_job() */
+       _MALI_UK_GET_PP_NUMBER_OF_CORES  = _MALI_UK_GET_NUMBER_OF_CORES,  /**< _mali_ukk_get_pp_number_of_cores() */
+       _MALI_UK_GET_PP_CORE_VERSION     = _MALI_UK_GET_CORE_VERSION,     /**< _mali_ukk_get_pp_core_version() */
+       _MALI_UK_PP_DISABLE_WB,                                           /**< _mali_ukk_pp_job_disable_wb() */
+       _MALI_UK_PP_AND_GP_START_JOB,                                     /**< _mali_ukk_pp_and_gp_start_job() */
+
+       /** Vertex Processor Functions  */
+
+       _MALI_UK_GP_START_JOB            = _MALI_UK_START_JOB,            /**< _mali_ukk_gp_start_job() */
+       _MALI_UK_GET_GP_NUMBER_OF_CORES  = _MALI_UK_GET_NUMBER_OF_CORES,  /**< _mali_ukk_get_gp_number_of_cores() */
+       _MALI_UK_GET_GP_CORE_VERSION     = _MALI_UK_GET_CORE_VERSION,     /**< _mali_ukk_get_gp_core_version() */
+       _MALI_UK_GP_SUSPEND_RESPONSE,                                     /**< _mali_ukk_gp_suspend_response() */
+
+       /** Profiling functions */
+
+       _MALI_UK_PROFILING_START         = 0, /**< __mali_uku_profiling_start() */
+       _MALI_UK_PROFILING_ADD_EVENT,         /**< __mali_uku_profiling_add_event() */
+       _MALI_UK_PROFILING_STOP,              /**< __mali_uku_profiling_stop() */
+       _MALI_UK_PROFILING_GET_EVENT,         /**< __mali_uku_profiling_get_event() */
+       _MALI_UK_PROFILING_CLEAR,             /**< __mali_uku_profiling_clear() */
+       _MALI_UK_PROFILING_GET_CONFIG,        /**< __mali_uku_profiling_get_config() */
+       _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+
+       /** VSYNC reporting fuctions */
+       _MALI_UK_VSYNC_EVENT_REPORT      = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode {
+       _MALI_DRIVER_MODE_RAW = 1,    /**< Reserved for future expansion */
+       _MALI_DRIVER_MODE_NORMAL = 2  /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type {
+       _MALI_GP2 = 2,                /**< MaliGP2 Programmable Vertex Processor */
+       _MALI_200 = 5,                /**< Mali200 Programmable Fragment Processor */
+       _MALI_400_GP = 6,             /**< Mali400 Programmable Vertex Processor */
+       _MALI_400_PP = 7,             /**< Mali400 Programmable Fragment Processor */
+       /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage {
+
+       _MALI_PP_READABLE   = (1<<0),  /** Readable by the Fragment Processor */
+       _MALI_PP_WRITEABLE  = (1<<1),  /** Writeable by the Fragment Processor */
+       _MALI_GP_READABLE   = (1<<2),  /** Readable by the Vertex Processor */
+       _MALI_GP_WRITEABLE  = (1<<3),  /** Writeable by the Vertex Processor */
+       _MALI_CPU_READABLE  = (1<<4),  /** Readable by the CPU */
+       _MALI_CPU_WRITEABLE = (1<<5),  /** Writeable by the CPU */
+       _MALI_GP_L2_ALLOC   = (1<<6),  /** GP allocate mali L2 cache lines*/
+       _MALI_MMU_READABLE  = _MALI_PP_READABLE | _MALI_GP_READABLE,   /** Readable by the MMU (including all cores behind it) */
+       _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+typedef enum mali_memory_cache_settings {
+       MALI_CACHE_STANDARD                     = 0,
+       MALI_CACHE_GP_READ_ALLOCATE     = 1,
+} mali_memory_cache_settings ;
+
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with  _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info {
+       u32 size;                     /**< Size of the memory bank in bytes */
+       _mali_bus_usage flags;        /**< Capabilitiy flags of the memory */
+       u32 maximum_order_supported;  /**< log2 supported size */
+       u32 identifier;               /* mali_memory_cache_settings cache_settings; */
+       struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code {
+       _MALIGP_JOB_ABORT,                  /**< Abort the Vertex Processor job */
+       _MALIGP_JOB_RESUME_WITH_NEW_HEAP    /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+       _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+       u32 arguments[2];               /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum {
+       _MALI_UK_START_JOB_STARTED,                         /**< Job started */
+       _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE           /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job  */
+
+typedef enum {
+       _MALI_UK_JOB_STATUS_END_SUCCESS         = 1<<(16+0),
+       _MALI_UK_JOB_STATUS_END_OOM             = 1<<(16+1),
+       _MALI_UK_JOB_STATUS_END_ABORT           = 1<<(16+2),
+       _MALI_UK_JOB_STATUS_END_TIMEOUT_SW      = 1<<(16+3),
+       _MALI_UK_JOB_STATUS_END_HANG            = 1<<(16+4),
+       _MALI_UK_JOB_STATUS_END_SEG_FAULT       = 1<<(16+5),
+       _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB     = 1<<(16+6),
+       _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR     = 1<<(16+7),
+       _MALI_UK_JOB_STATUS_END_SHUTDOWN        = 1<<(16+8),
+       _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled.  This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct {
+       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u32 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+       u32 priority;                       /**< [in] job priority. A lower number means higher priority */
+       u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+       u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+       u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+       u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+       u32 frame_builder_id;               /**< [in] id of the originating frame builder */
+       u32 flush_id;                       /**< [in] flush id within the originating frame builder */
+       _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
+       u32 *timeline_point_ptr;            /**< [in,out] pointer to location where point on gp timeline for this job will be written */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE (1<<2) /**< Enable per tile (aka heatmap) generation with for a job (using the enabled counter sources) */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct {
+       u32 user_job_ptr;               /**< [out] identifier for the job in user space */
+       _mali_uk_job_status status;     /**< [out] status of finished job */
+       u32 heap_current_addr;          /**< [out] value of the GP PLB PL heap start address register */
+       u32 perf_counter0;              /**< [out] value of performance counter 0 (see ARM DDI0415A) */
+       u32 perf_counter1;              /**< [out] value of performance counter 1 (see ARM DDI0415A) */
+} _mali_uk_gp_job_finished_s;
+
+typedef struct {
+       u32 user_job_ptr;                    /**< [out] identifier for the job in user space */
+       u32 cookie;                          /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 user_job_ptr;               /**< [in] identifier for the job in user space */
+       u32 priority;                   /**< [in] job priority. A lower number means higher priority */
+       u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS];         /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+       u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+       u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+       u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+       u32 num_cores;                      /**< [in] Number of cores to set up (valid range: 1-4) */
+       u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+       u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+       u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+       u32 frame_builder_id;               /**< [in] id of the originating frame builder */
+       u32 flush_id;                       /**< [in] flush id within the originating frame builder */
+       u32 flags;                          /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+       u32 tilesx;                         /**< [in] number of tiles in the x direction (needed for heatmap generation */
+       u32 tilesy;                         /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
+       u32 heatmap_mem;                    /**< [in] memory address to store counter values per tile (aka heatmap) */
+       u32 num_memory_cookies;             /**< [in] number of memory cookies attached to job */
+       u32 *memory_cookies;                /**< [in] memory cookies attached to job  */
+       _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
+       u32 *timeline_point_ptr;            /**< [in,out] pointer to location where point on pp timeline for this job will be written */
+} _mali_uk_pp_start_job_s;
+
+typedef struct {
+       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_gp_start_job_s *gp_args;   /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+       _mali_uk_pp_start_job_s *pp_args;   /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+} _mali_uk_pp_and_gp_start_job_s;
+
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct {
+       u32 user_job_ptr;                          /**< [out] identifier for the job in user space */
+       _mali_uk_job_status status;                /**< [out] status of finished job */
+       u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+       u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+       u32 perf_counter_src0;
+       u32 perf_counter_src1;
+} _mali_uk_pp_job_finished_s;
+
+typedef struct {
+       u32 number_of_enabled_cores;               /**< [out] the new number of enabled cores */
+} _mali_uk_pp_num_cores_changed_s;
+
+
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum {
+       _MALI_UK_PP_JOB_WB0 = 1,
+       _MALI_UK_PP_JOB_WB1 = 2,
+       _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 fb_id;                      /**< [in] Frame builder ID of job to disable WB units for */
+       u32 wb0_memory;
+       u32 wb1_memory;
+       u32 wb2_memory;
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+/** @defgroup _mali_uk_soft_job U/K Soft Job
+ * @{ */
+
+typedef struct {
+       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u32 type;                           /**< [in] type of soft job */
+       u32 user_job;                       /**< [in] identifier for the job in user space */
+       u32 *job_id_ptr;                    /**< [in,out] pointer to location where job id will be written */
+       _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
+       u32 point;                          /**< [out] point on soft timeline for this job */
+} _mali_uk_soft_job_start_s;
+
+typedef struct {
+       u32 user_job;                       /**< [out] identifier for the job in user space */
+} _mali_uk_soft_job_activated_s;
+
+typedef struct {
+       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u32 job_id;                         /**< [in] id for soft job */
+} _mali_uk_soft_job_signal_s;
+
+/** @} */ /* end group _mali_uk_soft_job */
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ *            | (( id <<  _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum {
+       /** core notifications */
+
+       _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS =  (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+       _MALI_NOTIFICATION_APPLICATION_QUIT =           (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+       _MALI_NOTIFICATION_SETTINGS_CHANGED =           (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+       _MALI_NOTIFICATION_SOFT_ACTIVATED =             (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
+
+       /** Fragment Processor notifications */
+
+       _MALI_NOTIFICATION_PP_FINISHED =                (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE =         (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+
+       /** Vertex Processor notifications */
+
+       _MALI_NOTIFICATION_GP_FINISHED =                (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_GP_STALLED =                 (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum {
+       _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+       _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+       _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+       _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+{                                           \
+       "sw_events_enable",                 \
+       "colorbuffer_capture_enable",       \
+       "depthbuffer_capture_enable",       \
+       "stencilbuffer_capture_enable",     \
+       "per_tile_counters_enable",         \
+       "buffer_capture_compositor",        \
+       "buffer_capture_window",            \
+       "buffer_capture_other",             \
+       "buffer_capture_n_frames",          \
+       "buffer_capture_resize_factor",     \
+       "sw_counters_enable",               \
+};
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct {
+       _mali_uk_user_setting_t setting;
+       u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ *     - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ *     - In this case, the value of the data union member is undefined.
+ *     - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ *    - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ *    - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ *    - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ *    - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ *    - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ *     - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ *     - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ *     - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct {
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_notification_type type; /**< [out] Type of notification available */
+       union {
+               _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+               _mali_uk_gp_job_finished_s  gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+               _mali_uk_pp_job_finished_s  pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+               _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+               _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */
+       } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct {
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 401
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
+       int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct {
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct {
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_user_setting_t setting; /**< [in] setting to get */
+       u32 value;                       /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @brief Arguments for _mali_ukk_request_high_priority() */
+typedef struct {
+       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_request_high_priority_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 phys_addr;                  /**< [in] physical address */
+       u32 size;                       /**< [in] size */
+       u32 mali_address;               /**< [in] mali address to map the physical memory to */
+       u32 rights;                     /**< [in] rights necessary for accessing memory */
+       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_map_external_mem_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mem_fd;                     /**< [in] Memory descriptor */
+       u32 size;                       /**< [in] size */
+       u32 mali_address;               /**< [in] mali address to map the physical memory to */
+       u32 rights;                     /**< [in] rights necessary for accessing memory */
+       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_attach_dma_buf_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mem_fd;                     /**< [in] Memory descriptor */
+       u32 size;                       /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
+} _mali_uk_release_dma_buf_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;                  /**< [in] secure id */
+       u32 size;                       /**< [in] size */
+       u32 mali_address;               /**< [in] mali address to map the physical memory to */
+       u32 rights;                     /**< [in] rights necessary for accessing memory */
+       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
+} _mali_uk_attach_ump_mem_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *va;                       /**< [in,out] Virtual address of the start of the range */
+       u32 pa;                         /**< [out] Physical base address of the range */
+       u32 size;                       /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+/**
+ * @brief Arguments for _mali_uk[uk]_mem_write_safe()
+ */
+typedef struct {
+       void *ctx;        /**< [in,out] user-kernel context (trashed on output) */
+       const void *src;  /**< [in]     Pointer to source data */
+       void *dest;       /**< [in]     Destination Mali buffer */
+       u32 size;         /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+} _mali_uk_mem_write_safe_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [in] size of buffer to receive mmu page table information */
+       void *buffer;                   /**< [in,out] buffer to receive mmu page table information */
+       u32 register_writes_size;       /**< [out] size of MMU register dump */
+       u32 *register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
+       u32 page_table_dump_size;       /**< [out] size of MMU page table dump */
+       u32 *page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 number_of_total_cores;      /**< [out] Total number of Fragment Processor cores in the system */
+       u32 number_of_enabled_cores;    /**< [out] Number of enabled Fragment Processor cores */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version  */
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 number_of_cores;            /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 limit;                      /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 event_id;                   /**< [in] event id to register (see  enum mali_profiling_events for values) */
+       u32 data[5];                    /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 count;                      /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 index;                      /**< [in] which index to get (starting at zero) */
+       u64 timestamp;                  /**< [out] timestamp of event */
+       u32 event_id;                   /**< [out] event id of event (see  enum mali_profiling_events for values) */
+       u32 data[5];                    /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the  OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
+       u32 size;                       /**< [in] Size of the requested mapping */
+       u32 phys_addr;                  /**< [in] Physical address - could be offset, depending on caller+callee convention */
+       u32 cookie;                     /**< [out] Returns a cookie for use in munmap calls */
+       void *uku_private;              /**< [in] User-side Private word used by U/K interface */
+       void *ukk_private;              /**< [in] Kernel-side Private word used by U/K interface */
+       mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [in] The mapping returned from mmap call */
+       u32 size;                       /**< [in] The size passed to mmap call */
+       u32 cookie;                     /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event {
+       _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+       _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_vsync_event event;     /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32* counters;                  /**< [in] The array of counter values */
+       u32  num_counters;              /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_timeline U/K Mali Timeline
+ * @{ */
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 timeline;                   /**< [in] timeline id */
+       u32 point;                      /**< [out] latest point on timeline */
+} _mali_uk_timeline_get_latest_point_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_fence_t fence;         /**< [in] fence */
+       u32 timeout;                    /**< [in] timeout (0 for no wait, -1 for blocking) */
+       u32 status;                     /**< [out] status of fence (1 if signaled, 0 if timeout) */
+} _mali_uk_timeline_wait_s;
+
+typedef struct {
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_fence_t fence;         /**< [in] mali fence to create linux sync fence from */
+       s32 sync_fd;                    /**< [out] file descriptor for new linux sync fence */
+} _mali_uk_timeline_create_sync_fence_s;
+
+/** @} */ /* end group _mali_uk_timeline */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/license/gpl/mali_kernel_license.h
new file mode 100644 (file)
index 0000000..e9e5e55
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE     "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_device_pause_resume.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_device_pause_resume.c
new file mode 100644 (file)
index 0000000..992a373
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+void mali_dev_pause(void)
+{
+       mali_gp_scheduler_suspend();
+       mali_pp_scheduler_suspend();
+       mali_group_power_off(MALI_FALSE);
+       mali_l2_cache_pause_all(MALI_TRUE);
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+void mali_dev_resume(void)
+{
+       mali_l2_cache_pause_all(MALI_FALSE);
+       mali_gp_scheduler_resume();
+       mali_pp_scheduler_resume();
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_linux.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_linux.c
new file mode 100644 (file)
index 0000000..3c491a9
--- /dev/null
@@ -0,0 +1,814 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h>   /* kernel module definitions */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/cdev.h>     /* character device definitions */
+#include <linux/mm.h>       /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+/* MALI_SEC */
+#include <exynos4_pmm.h>
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+#endif /* CONFIG_TRACEPOINTS */
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern unsigned int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern unsigned int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+/** the max fps the same as display vsync default 60, can set by module insert parameter */
+extern int mali_max_system_fps;
+module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_system_fps, "Max system fps the same as display VSYNC.");
+
+/** a lower limit on their desired FPS default 58, can set by module insert parameter*/
+extern int mali_desired_fps;
+module_param(mali_desired_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_desired_fps, "A bit lower than max_system_fps which user desired fps");
+#endif
+
+#if MALI_ENABLE_CPU_CYCLES
+#include <linux/cpumask.h>
+#include <linux/timer.h>
+#include <asm/smp.h>
+static struct timer_list mali_init_cpu_clock_timers[8];
+static u32 mali_cpu_clock_last_value[8] = {0,};
+#endif
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+struct pm_ext_ops mali_dev_ext_pm_ops = {
+       .base =
+       {
+               .suspend = mali_driver_suspend_scheduler,
+               .resume = mali_driver_resume_scheduler,
+               .freeze = mali_driver_suspend_scheduler,
+               .thaw =   mali_driver_resume_scheduler,
+       },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops = {
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = mali_driver_runtime_suspend,
+       .runtime_resume = mali_driver_runtime_resume,
+       .runtime_idle = mali_driver_runtime_idle,
+#endif
+       .suspend = mali_driver_suspend_scheduler,
+       .resume = mali_driver_resume_scheduler,
+       .freeze = mali_driver_suspend_scheduler,
+       .thaw = mali_driver_resume_scheduler,
+       .poweroff = mali_driver_suspend_scheduler,
+};
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver = {
+       .probe  = mali_probe,
+       .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+       .pm = &mali_dev_ext_pm_ops,
+#endif
+       .driver =
+       {
+               .name   = "mali_dev", /* MALI_SEC MALI_GPU_NAME_UTGARD, */
+               .owner  = THIS_MODULE,
+               .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+               .pm = &mali_dev_pm_ops,
+#endif
+       },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops = {
+       .owner = THIS_MODULE,
+       .open = mali_open,
+       .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+       .unlocked_ioctl = mali_ioctl,
+#else
+       .ioctl = mali_ioctl,
+#endif
+       .mmap = mali_mmap
+};
+
+
+#if MALI_ENABLE_CPU_CYCLES
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
+{
+       /* The CPU assembly reference used is: ARM Architecture Reference Manual ARMv7-AR C.b */
+       u32 write_value;
+
+       /* See B4.1.116 PMCNTENSET, Performance Monitors Count Enable Set register, VMSA */
+       /* setting p15 c9 c12 1 to 0x8000000f==CPU_CYCLE_ENABLE |EVENT_3_ENABLE|EVENT_2_ENABLE|EVENT_1_ENABLE|EVENT_0_ENABLE */
+       asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(0x8000000f));
+
+
+       /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */
+       write_value = 1<<0; /* Bit 0 set. Enable counters */
+       if (reset) {
+               write_value |= 1<<1; /* Reset event counters */
+               write_value |= 1<<2; /* Reset cycle counter  */
+       }
+       if (enable_divide_by_64) {
+               write_value |= 1<<3; /* Enable the Clock divider by 64 */
+       }
+       write_value |= 1<<4; /* Export enable. Not needed */
+       asm volatile ("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value ));
+
+       /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */
+       asm volatile ("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
+
+
+       /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */
+       /* User mode access to the Performance Monitors enabled. */
+       /* Lets User space read cpu clock cycles */
+       asm volatile( "mcr p15, 0, %0, c9, c14, 0" :: "r"(1) );
+}
+
+/** A timer function that configures the cycle clock counter on current CPU.
+       The function \a mali_init_cpu_time_counters_on_all_cpus sets up this function
+       to trigger on all Cpus during module load. */
+static void mali_init_cpu_clock_timer_func(unsigned long data)
+{
+       int reset_counters, enable_divide_clock_counter_by_64;
+       int current_cpu = raw_smp_processor_id();
+       unsigned int sample0;
+       unsigned int sample1;
+
+       MALI_IGNORE(data);
+
+       reset_counters= 1;
+       enable_divide_clock_counter_by_64 = 0;
+       mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64);
+
+       sample0 = mali_get_cpu_cyclecount();
+       sample1 = mali_get_cpu_cyclecount();
+
+       MALI_DEBUG_PRINT(3, ("Init Cpu %d cycle counter- First two samples: %08x %08x \n", current_cpu, sample0, sample1));
+}
+
+/** A timer functions for storing current time on all cpus.
+    Used for checking if the clocks have similar values or if they are drifting. */
+static void mali_print_cpu_clock_timer_func(unsigned long data)
+{
+       int current_cpu = raw_smp_processor_id();
+       unsigned int sample0;
+
+       MALI_IGNORE(data);
+       sample0 = mali_get_cpu_cyclecount();
+       if ( current_cpu<8 ) {
+               mali_cpu_clock_last_value[current_cpu] = sample0;
+       }
+}
+
+/** Init the performance registers on all CPUs to count clock cycles.
+       For init \a print_only should be 0.
+    If \a print_only is 1, it will intead print the current clock value of all CPUs.*/
+void mali_init_cpu_time_counters_on_all_cpus(int print_only)
+{
+       int i = 0;
+       int cpu_number;
+       int jiffies_trigger;
+       int jiffies_wait;
+
+       jiffies_wait = 2;
+       jiffies_trigger = jiffies + jiffies_wait;
+
+       for ( i=0 ; i < 8 ; i++ ) {
+               init_timer(&mali_init_cpu_clock_timers[i]);
+               if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func;
+               else            mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func;
+               mali_init_cpu_clock_timers[i].expires = jiffies_trigger ;
+       }
+       cpu_number = cpumask_first(cpu_online_mask);
+       for ( i=0 ; i < 8 ; i++ ) {
+               int next_cpu;
+               add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number);
+               next_cpu = cpumask_next(cpu_number, cpu_online_mask);
+               if (next_cpu >= nr_cpu_ids) break;
+               cpu_number = next_cpu;
+       }
+
+       while (jiffies_wait) jiffies_wait= schedule_timeout_uninterruptible(jiffies_wait);
+
+       for ( i=0 ; i < 8 ; i++ ) {
+               del_timer_sync(&mali_init_cpu_clock_timers[i]);
+       }
+
+       if (print_only) {
+               if ( (0==mali_cpu_clock_last_value[2]) &&  (0==mali_cpu_clock_last_value[3]) ) {
+                       /* Diff can be printed if we want to check if the clocks are in sync
+                       int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/
+                       MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1]));
+               } else {
+                       MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3] ));
+               }
+       }
+}
+#endif
+
+
+int mali_module_init(void)
+{
+       int err = 0;
+
+       MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+       MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+       MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+#if MALI_ENABLE_CPU_CYCLES
+       mali_init_cpu_time_counters_on_all_cpus(0);
+       MALI_DEBUG_PRINT(2, ("CPU cycle counter setup complete\n"));
+       /* Printing the current cpu counters */
+       mali_init_cpu_time_counters_on_all_cpus(1);
+#endif
+
+       /* Initialize module wide settings */
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+       MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+       err = mali_platform_device_register();
+       if (0 != err) {
+               return err;
+       }
+#endif
+
+       MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+       err = platform_driver_register(&mali_platform_driver);
+
+       if (0 != err) {
+               MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+               mali_platform_device_unregister();
+#endif
+               mali_platform_device = NULL;
+               return err;
+       }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+       err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+       if (0 != err) {
+               /* No biggie if we wheren't able to initialize the profiling */
+               MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+       }
+#endif
+
+       MALI_PRINT(("Mali device driver loaded\n"));
+
+       return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+       MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+
+       MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+       _mali_internal_profiling_term();
+#endif
+
+       platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+       MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+       mali_platform_device_unregister();
+#endif
+
+       MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+static int mali_probe(struct platform_device *pdev)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+       if (NULL != mali_platform_device) {
+               /* Already connected to a device, return error */
+               MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+               return -EEXIST;
+       }
+
+       mali_platform_device = pdev;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
+               /* Initialize the Mali GPU HW specified by pdev */
+               if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
+                       /* Register a misc device (so we are accessible from user space) */
+                       err = mali_miscdevice_register(pdev);
+                       if (0 == err) {
+                               /* Setup sysfs entries */
+                               err = mali_sysfs_register(mali_dev_name);
+                               if (0 == err) {
+                                       MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+                                       return 0;
+                               } else {
+                                       MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+                               }
+                               mali_miscdevice_unregister();
+                       } else {
+                               MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+                       }
+                       mali_terminate_subsystems();
+               } else {
+                       MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+               }
+               _mali_osk_wq_term();
+       }
+
+       mali_platform_device = NULL;
+       return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+       MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+       mali_sysfs_unregister();
+       mali_miscdevice_unregister();
+       mali_terminate_subsystems();
+       _mali_osk_wq_term();
+       mali_platform_device = NULL;
+       return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+       int err;
+
+       mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+       mali_miscdevice.name = mali_dev_name;
+       mali_miscdevice.fops = &mali_fops;
+       mali_miscdevice.parent = get_device(&pdev->dev);
+
+       err = misc_register(&mali_miscdevice);
+       if (0 != err) {
+               MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+       }
+
+       return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+       misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+       mali_pm_os_suspend();
+       /* MALI_SEC */
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_DEEP_SLEEP);
+       return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+       /* MALI_SEC */
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_ON);
+       mali_pm_os_resume();
+       return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+       mali_pm_runtime_suspend();
+       /* MALI_SEC */
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_LIGHT_SLEEP);
+       return 0;
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+       /* MALI_SEC */
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_ON);
+       mali_pm_runtime_resume();
+       return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+       /* Nothing to do */
+       return 0;
+}
+#endif
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+       struct mali_session_data * session_data;
+       _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (mali_miscdevice.minor != iminor(inode)) {
+               MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+               return -ENODEV;
+       }
+
+       /* allocated struct to track this session */
+       err = _mali_ukk_open((void **)&session_data);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* initialize file pointer */
+       filp->f_pos = 0;
+
+       /* link in our session data */
+       filp->private_data = (void*)session_data;
+
+       return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+       _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (mali_miscdevice.minor != iminor(inode)) {
+               MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+               return -ENODEV;
+       }
+
+       err = _mali_ukk_close((void **)&filp->private_data);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+       switch(err) {
+       case _MALI_OSK_ERR_OK :
+               return 0;
+       case _MALI_OSK_ERR_FAULT:
+               return -EFAULT;
+       case _MALI_OSK_ERR_INVALID_FUNC:
+               return -ENOTTY;
+       case _MALI_OSK_ERR_INVALID_ARGS:
+               return -EINVAL;
+       case _MALI_OSK_ERR_NOMEM:
+               return -ENOMEM;
+       case _MALI_OSK_ERR_TIMEOUT:
+               return -ETIMEDOUT;
+       case _MALI_OSK_ERR_RESTARTSYSCALL:
+               return -ERESTARTSYS;
+       case _MALI_OSK_ERR_ITEM_NOT_FOUND:
+               return -ENOENT;
+       default:
+               return -EFAULT;
+       }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+       int err;
+       struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+       /* inode not used */
+       (void)inode;
+#endif
+
+       MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+       session_data = (struct mali_session_data *)filp->private_data;
+       if (NULL == session_data) {
+               MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+               return -ENOTTY;
+       }
+
+       if (NULL == (void *)arg) {
+               MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+               return -ENOTTY;
+       }
+
+       switch(cmd) {
+       case MALI_IOC_WAIT_FOR_NOTIFICATION:
+               err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+               break;
+
+       case MALI_IOC_GET_API_VERSION:
+               err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+               break;
+
+       case MALI_IOC_POST_NOTIFICATION:
+               err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+               break;
+
+       case MALI_IOC_GET_USER_SETTINGS:
+               err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+               break;
+
+       case MALI_IOC_REQUEST_HIGH_PRIORITY:
+               err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
+               break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+       case MALI_IOC_PROFILING_START:
+               err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_ADD_EVENT:
+               err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_STOP:
+               err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_GET_EVENT:
+               err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_CLEAR:
+               err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_GET_CONFIG:
+               /* Deprecated: still compatible with get_user_settings */
+               err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+               err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+               break;
+
+#else
+
+       case MALI_IOC_PROFILING_START:              /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_ADD_EVENT:          /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_STOP:               /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_GET_EVENT:          /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_CLEAR:              /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_GET_CONFIG:         /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+               MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+               err = -ENOTTY;
+               break;
+
+#endif
+
+       case MALI_IOC_MEM_WRITE_SAFE:
+               err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_MAP_EXT:
+               err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_UNMAP_EXT:
+               err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+               err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+               err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+               break;
+
+#if defined(CONFIG_MALI400_UMP)
+
+       case MALI_IOC_MEM_ATTACH_UMP:
+               err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_RELEASE_UMP:
+               err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+               break;
+
+#else
+
+       case MALI_IOC_MEM_ATTACH_UMP:
+       case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+               MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+               err = -ENOTTY;
+               break;
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+       case MALI_IOC_MEM_ATTACH_DMA_BUF:
+               err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_RELEASE_DMA_BUF:
+               err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+               err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+               break;
+#else
+
+       case MALI_IOC_MEM_ATTACH_DMA_BUF:   /* FALL-THROUGH */
+       case MALI_IOC_MEM_RELEASE_DMA_BUF:  /* FALL-THROUGH */
+       case MALI_IOC_MEM_DMA_BUF_GET_SIZE: /* FALL-THROUGH */
+               MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+               err = -ENOTTY;
+               break;
+#endif
+
+       case MALI_IOC_PP_START_JOB:
+               err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_AND_GP_START_JOB:
+               err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+               err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_CORE_VERSION_GET:
+               err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_DISABLE_WB:
+               err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_START_JOB:
+               err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+               err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_CORE_VERSION_GET:
+               err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_SUSPEND_RESPONSE:
+               err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+               break;
+
+       case MALI_IOC_VSYNC_EVENT_REPORT:
+               err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+               break;
+
+       case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+               err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
+               break;
+       case MALI_IOC_TIMELINE_WAIT:
+               err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
+               break;
+       case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+               err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
+               break;
+       case MALI_IOC_SOFT_JOB_START:
+               err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
+               break;
+       case MALI_IOC_SOFT_JOB_SIGNAL:
+               err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_INIT: /* Fallthrough */
+       case MALI_IOC_MEM_TERM: /* Fallthrough */
+               MALI_DEBUG_PRINT(2, ("Deprecated ioctls called\n"));
+               err = -ENOTTY;
+               break;
+
+       case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
+       case MALI_IOC_MEM_FREE_BIG_BLOCK:
+               MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
+               err = -ENOTTY;
+               break;
+
+       default:
+               MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+               err = -ENOTTY;
+       };
+
+       return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_linux.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_linux.h
new file mode 100644 (file)
index 0000000..09e736c
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/cdev.h>     /* character device definitions */
+#include "mali_kernel_license.h"
+#include "mali_osk_types.h"
+
+extern struct platform_device *mali_platform_device;
+
+#if MALI_LICENSE_IS_GPL
+/* Defined in mali_osk_irq.h */
+extern struct workqueue_struct * mali_wq_normal;
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_sysfs.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_sysfs.c
new file mode 100644 (file)
index 0000000..204d87e
--- /dev/null
@@ -0,0 +1,1392 @@
+/**
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_pp_scheduler.h"
+
+#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(src, sub_job) ((1 << 24) | (1 << 16) | (sub_job << 8) | src)
+#define PRIVATE_DATA_COUNTER_IS_PP(a) ((((a) >> 24) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SRC(a) (a & 0xFF)
+#define PRIVATE_DATA_COUNTER_IS_SUB_JOB(a) ((((a) >> 16) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SUB_JOB(a) (((a) >> 8) & 0xFF)
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum {
+       _MALI_DEVICE_SUSPEND,
+       _MALI_DEVICE_RESUME,
+       _MALI_DEVICE_DVFS_PAUSE,
+       _MALI_DEVICE_DVFS_RESUME,
+       _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char* const mali_power_events[_MALI_MAX_EVENTS] = {
+       [_MALI_DEVICE_SUSPEND] = "suspend",
+       [_MALI_DEVICE_RESUME] = "resume",
+       [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+       [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+       struct mali_group *group;
+
+       group = (struct mali_group *)filp->private_data;
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       r = sprintf(buffer, "%u\n", mali_group_is_enabled(group) ? 1 : 0);
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+       unsigned long val;
+       struct mali_group *group;
+
+       group = (struct mali_group *)filp->private_data;
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       r = strict_strtoul(&buffer[0], 10, &val);
+       if (0 != r) {
+               return -EINVAL;
+       }
+
+       switch (val) {
+       case 1:
+               mali_group_enable(group);
+               break;
+       case 0:
+               mali_group_disable(group);
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static const struct file_operations group_enabled_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read = group_enabled_read,
+       .write = group_enabled_write,
+};
+
+static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+       struct mali_hw_core *hw_core;
+
+       hw_core = (struct mali_hw_core *)filp->private_data;
+       MALI_DEBUG_ASSERT_POINTER(hw_core);
+
+       r = sprintf(buffer, "0x%08X\n", hw_core->phys_addr);
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations hw_core_base_addr_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read = hw_core_base_addr_read,
+};
+
+static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+       char buf[64];
+       int r;
+       u32 val;
+
+       if (MALI_TRUE == is_pp) {
+               /* PP counter */
+               if (MALI_TRUE == is_sub_job) {
+                       /* Get counter for a particular sub job */
+                       if (0 == src_id) {
+                               val = mali_pp_job_get_pp_counter_sub_job_src0(sub_job);
+                       } else {
+                               val = mali_pp_job_get_pp_counter_sub_job_src1(sub_job);
+                       }
+               } else {
+                       /* Get default counter for all PP sub jobs */
+                       if (0 == src_id) {
+                               val = mali_pp_job_get_pp_counter_global_src0();
+                       } else {
+                               val = mali_pp_job_get_pp_counter_global_src1();
+                       }
+               }
+       } else {
+               /* GP counter */
+               if (0 == src_id) {
+                       val = mali_gp_job_get_gp_counter_src0();
+               } else {
+                       val = mali_gp_job_get_gp_counter_src1();
+               }
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val) {
+               r = sprintf(buf, "-1\n");
+       } else {
+               r = sprintf(buf, "%u\n", val);
+       }
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val < 0) {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       if (MALI_TRUE == is_pp) {
+               /* PP counter */
+               if (MALI_TRUE == is_sub_job) {
+                       /* Set counter for a particular sub job */
+                       if (0 == src_id) {
+                               mali_pp_job_set_pp_counter_sub_job_src0(sub_job, (u32)val);
+                       } else {
+                               mali_pp_job_set_pp_counter_sub_job_src1(sub_job, (u32)val);
+                       }
+               } else {
+                       /* Set default counter for all PP sub jobs */
+                       if (0 == src_id) {
+                               mali_pp_job_set_pp_counter_global_src0((u32)val);
+                       } else {
+                               mali_pp_job_set_pp_counter_global_src1((u32)val);
+                       }
+               }
+       } else {
+               /* GP counter */
+               if (0 == src_id) {
+                       mali_gp_job_set_gp_counter_src0((u32)val);
+               } else {
+                       mali_gp_job_set_gp_counter_src1((u32)val);
+               }
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static const struct file_operations profiling_counter_src_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = profiling_counter_src_read,
+       .write = profiling_counter_src_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 val;
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+       if (0 == src_id) {
+               val = mali_l2_cache_core_get_counter_src0(l2_core);
+       } else {
+               val = mali_l2_cache_core_get_counter_src1(l2_core);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val) {
+               r = sprintf(buf, "-1\n");
+       } else {
+               r = sprintf(buf, "%u\n", val);
+       }
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val < 0) {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       if (0 == src_id) {
+               mali_l2_cache_core_set_counter_src0(l2_core, (u32)val);
+       } else {
+               mali_l2_cache_core_set_counter_src1(l2_core, (u32)val);
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+       u32 l2_id;
+       struct mali_l2_cache_core *l2_cache;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtol(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val < 0) {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       l2_id = 0;
+       l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+       while (NULL != l2_cache) {
+               if (0 == src_id) {
+                       mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val);
+               } else {
+                       mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val);
+               }
+
+               /* try next L2 */
+               l2_id++;
+               l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_src0_read,
+       .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_src1_read,
+       .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .write = l2_all_counter_src1_write,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+       char buf[32];
+
+       cnt = min(cnt, sizeof(buf) - 1);
+       if (copy_from_user(buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (0 != ret) {
+               return ret;
+       }
+
+       /* Update setting (not exactly thread safe) */
+       if (1 == val && MALI_FALSE == power_always_on_enabled) {
+               power_always_on_enabled = MALI_TRUE;
+               _mali_osk_pm_dev_ref_add();
+       } else if (0 == val && MALI_TRUE == power_always_on_enabled) {
+               power_always_on_enabled = MALI_FALSE;
+               _mali_osk_pm_dev_ref_dec();
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       if (MALI_TRUE == power_always_on_enabled) {
+               return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+       } else {
+               return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+       }
+}
+
+static const struct file_operations power_always_on_fops = {
+       .owner = THIS_MODULE,
+       .read  = power_always_on_read,
+       .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+
+       if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_SUSPEND],strlen(mali_power_events[_MALI_DEVICE_SUSPEND]))) {
+               mali_pm_os_suspend();
+
+       } else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_RESUME],strlen(mali_power_events[_MALI_DEVICE_RESUME]))) {
+               mali_pm_os_resume();
+       } else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_PAUSE],strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]))) {
+               mali_dev_pause();
+       } else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_RESUME],strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]))) {
+               mali_dev_resume();
+       }
+       *ppos += cnt;
+       return cnt;
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+       file->f_pos = offset;
+       return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+       .owner = THIS_MODULE,
+       .write = power_power_events_write,
+       .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+       u32 len = 0;
+       u32 size;
+       char *buf;
+
+       size = seq_get_buf(seq_file, &buf);
+
+       if(!size) {
+               return -ENOMEM;
+       }
+
+       /* Create the internal state dump. */
+       len  = snprintf(buf+len, size-len, "Mali device driver %s\n", SVN_REV_STRING);
+       len += snprintf(buf+len, size-len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+       len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+       seq_commit(seq_file, len);
+
+       return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+       .owner = THIS_MODULE,
+       .open = mali_seq_internal_state_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       int r;
+
+       r = sprintf(buf, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val != 0) {
+               u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+               /* check if we are already recording */
+               if (MALI_TRUE == _mali_internal_profiling_is_recording()) {
+                       MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+                       return -EFAULT;
+               }
+
+               /* check if we need to clear out an old recording first */
+               if (MALI_TRUE == _mali_internal_profiling_have_recording()) {
+                       if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear()) {
+                               MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+                               return -EFAULT;
+                       }
+               }
+
+               /* start recording profiling data */
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+                       MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+                       return -EFAULT;
+               }
+
+               MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+       } else {
+               /* stop recording profiling data */
+               u32 count = 0;
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count)) {
+                       MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+                       return -EFAULT;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+       .owner = THIS_MODULE,
+       .read  = profiling_record_read,
+       .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+       loff_t *spos;
+
+       /* check if we have data avaiable */
+       if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+               return NULL;
+       }
+
+       spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+       if (NULL == spos) {
+               return NULL;
+       }
+
+       *spos = *pos;
+       return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       loff_t *spos = v;
+
+       /* check if we have data avaiable */
+       if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+               return NULL;
+       }
+
+       /* check if the next entry actually is avaiable */
+       if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1)) {
+               return NULL;
+       }
+
+       *pos = ++*spos;
+       return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+       kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+       loff_t *spos = v;
+       u32 index;
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+
+       index = (u32)*spos;
+
+       /* Retrieve all events */
+       if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+               seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+               return 0;
+       }
+
+       return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+#define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+       static u64 start_time = 0;
+       loff_t *spos = v;
+       u32 index;
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+
+       index = (u32)*spos;
+
+       /* Retrieve all events */
+       if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+               seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+               if (0 == index) {
+                       start_time = timestamp;
+               }
+
+               seq_printf(seq_file, "[%06u] ", index);
+
+               switch(event_id & 0x0F000000) {
+               case MALI_PROFILING_EVENT_TYPE_SINGLE:
+                       seq_printf(seq_file, "SINGLE | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_START:
+                       seq_printf(seq_file, "START | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_STOP:
+                       seq_printf(seq_file, "STOP | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+                       seq_printf(seq_file, "SUSPEND | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_RESUME:
+                       seq_printf(seq_file, "RESUME | ");
+                       break;
+               default:
+                       seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+                       break;
+               }
+
+               switch(event_id & 0x00FF0000) {
+               case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+                       seq_printf(seq_file, "SW | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_GP0:
+                       seq_printf(seq_file, "GP0 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP0:
+                       seq_printf(seq_file, "PP0 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP1:
+                       seq_printf(seq_file, "PP1 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP2:
+                       seq_printf(seq_file, "PP2 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP3:
+                       seq_printf(seq_file, "PP3 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP4:
+                       seq_printf(seq_file, "PP4 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP5:
+                       seq_printf(seq_file, "PP5 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP6:
+                       seq_printf(seq_file, "PP6 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP7:
+                       seq_printf(seq_file, "PP7 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_GPU:
+                       seq_printf(seq_file, "GPU | ");
+                       break;
+               default:
+                       seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+                       break;
+               }
+
+               if (MALI_EVENT_ID_IS_HW(event_id)) {
+                       if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) {
+                               switch(event_id & 0x0000FFFF) {
+                               case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+                                       seq_printf(seq_file, "PHYSICAL | ");
+                                       break;
+                               case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+                                       seq_printf(seq_file, "VIRTUAL | ");
+                                       break;
+                               default:
+                                       seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+                                       break;
+                               }
+                       } else {
+                               seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+                       }
+               } else {
+                       seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+               }
+
+               seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+               return 0;
+       }
+
+       return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+       .start = profiling_events_start,
+       .next  = profiling_events_next,
+       .stop  = profiling_events_stop,
+       .show  = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+       .owner = THIS_MODULE,
+       .open = profiling_events_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+       .start = profiling_events_start,
+       .next  = profiling_events_next,
+       .stop  = profiling_events_stop,
+       .show  = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+       .owner = THIS_MODULE,
+       .open = profiling_events_human_readable_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+#endif
+
+static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 mem = _mali_ukk_report_memory_usage();
+
+       r = snprintf(buf, 64, "%u\n", mem);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations memory_usage_fops = {
+       .owner = THIS_MODULE,
+       .read = memory_used_read,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval= _mali_ukk_utilization_gp_pp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval= _mali_ukk_utilization_gp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval= _mali_ukk_utilization_pp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+       _mali_uk_user_setting_t setting;
+       char buf[32];
+
+       cnt = min(cnt, sizeof(buf) - 1);
+       if (copy_from_user(buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (0 != ret) {
+               return ret;
+       }
+
+       /* Update setting */
+       setting = (_mali_uk_user_setting_t)(filp->private_data);
+       mali_set_user_setting(setting, val);
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 value;
+       _mali_uk_user_setting_t setting;
+
+       setting = (_mali_uk_user_setting_t)(filp->private_data);
+       value = mali_get_user_setting(setting);
+
+       r = snprintf(buf, 64, "%u\n", value);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+       .owner = THIS_MODULE,
+       .open = open_copy_private_data,
+       .read = user_settings_read,
+       .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+       struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+       if (mali_user_settings_dir != NULL) {
+               int i;
+               for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
+                       debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void*)i, &user_settings_fops);
+               }
+       }
+
+       return 0;
+}
+
+static ssize_t pmu_power_down_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int ret;
+       char buffer[32];
+       unsigned long val;
+       struct mali_pmu_core *pmu;
+       _mali_osk_errcode_t err;
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       ret = strict_strtoul(&buffer[0], 10, &val);
+       if (0 != ret) {
+               return -EINVAL;
+       }
+
+       pmu = mali_pmu_get_global_pmu_core();
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+
+       err = mali_pmu_power_down(pmu, val);
+       if (_MALI_OSK_ERR_OK != err) {
+               return -EINVAL;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static ssize_t pmu_power_up_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int ret;
+       char buffer[32];
+       unsigned long val;
+       struct mali_pmu_core *pmu;
+       _mali_osk_errcode_t err;
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       ret = strict_strtoul(&buffer[0], 10, &val);
+       if (0 != ret) {
+               return -EINVAL;
+       }
+
+       pmu = mali_pmu_get_global_pmu_core();
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+
+       err = mali_pmu_power_up(pmu, val);
+       if (_MALI_OSK_ERR_OK != err) {
+               return -EINVAL;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static const struct file_operations pmu_power_down_fops = {
+       .owner = THIS_MODULE,
+       .write = pmu_power_down_write,
+};
+
+static const struct file_operations pmu_power_up_fops = {
+       .owner = THIS_MODULE,
+       .write = pmu_power_up_write,
+};
+
+static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int ret;
+       char buffer[32];
+       unsigned long val;
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       ret = strict_strtoul(&buffer[0], 10, &val);
+       if (0 != ret) {
+               return -EINVAL;
+       }
+
+       ret = mali_pp_scheduler_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+       if (ret) {
+               return ret;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+
+       r = sprintf(buffer, "%u\n", mali_pp_scheduler_get_num_cores_enabled());
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_enabled_fops = {
+       .owner = THIS_MODULE,
+       .write = pp_num_cores_enabled_write,
+       .read = pp_num_cores_enabled_read,
+       .llseek = default_llseek,
+};
+
+static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+
+       r = sprintf(buffer, "%u\n", mali_pp_scheduler_get_num_cores_total());
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_total_fops = {
+       .owner = THIS_MODULE,
+       .read = pp_num_cores_total_read,
+};
+
+static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int ret;
+       char buffer[32];
+       unsigned long val;
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       ret = strict_strtoul(&buffer[0], 10, &val);
+       if (0 != ret) {
+               return -EINVAL;
+       }
+
+       switch (val) {
+       case 1:
+               mali_pp_scheduler_core_scaling_enable();
+               break;
+       case 0:
+               mali_pp_scheduler_core_scaling_disable();
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       return simple_read_from_buffer(buf, count, offp, mali_pp_scheduler_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+}
+static const struct file_operations pp_core_scaling_enabled_fops = {
+       .owner = THIS_MODULE,
+       .write = pp_core_scaling_enabled_write,
+       .read = pp_core_scaling_enabled_read,
+       .llseek = default_llseek,
+};
+
+static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r = 0;
+       char buffer[64];
+
+       switch (mali_kernel_core_get_product_id()) {
+       case _MALI_PRODUCT_ID_MALI200:
+               r = sprintf(buffer, "Mali-200\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI300:
+               r = sprintf(buffer, "Mali-300\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI400:
+               r = sprintf(buffer, "Mali-400 MP\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI450:
+               r = sprintf(buffer, "Mali-450 MP\n");
+               break;
+       case _MALI_PRODUCT_ID_UNKNOWN:
+               return -EINVAL;
+               break;
+       };
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations version_fops = {
+       .owner = THIS_MODULE,
+       .read = version_read,
+};
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+       mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+       if(ERR_PTR(-ENODEV) == mali_debugfs_dir) {
+               /* Debugfs not supported. */
+               mali_debugfs_dir = NULL;
+       } else {
+               if(NULL != mali_debugfs_dir) {
+                       /* Debugfs directory created successfully; create files now */
+                       struct dentry *mali_pmu_dir;
+                       struct dentry *mali_power_dir;
+                       struct dentry *mali_gp_dir;
+                       struct dentry *mali_pp_dir;
+                       struct dentry *mali_l2_dir;
+                       struct dentry *mali_profiling_dir;
+
+                       debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
+
+                       mali_pmu_dir = debugfs_create_dir("pmu", mali_debugfs_dir);
+                       if (NULL != mali_pmu_dir) {
+                               debugfs_create_file("power_down", 0200, mali_pmu_dir, NULL, &pmu_power_down_fops);
+                               debugfs_create_file("power_up", 0200, mali_pmu_dir, NULL, &pmu_power_up_fops);
+                       }
+
+                       mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+                       if (mali_power_dir != NULL) {
+                               /* MALI_SEC : 0600 -> 0400 */
+                               debugfs_create_file("always_on", 0400, mali_power_dir, NULL, &power_always_on_fops);
+                               /* MALI_SEC : 0200 -> 0400 */
+                               debugfs_create_file("power_events", 0400, mali_power_dir, NULL, &power_power_events_fops);
+                       }
+
+                       mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+                       if (mali_gp_dir != NULL) {
+                               u32 num_groups;
+                               int i;
+
+                               num_groups = mali_group_get_glob_num_groups();
+                               for (i = 0; i < num_groups; i++) {
+                                       struct mali_group *group = mali_group_get_glob_group(i);
+
+                                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+                                       if (NULL != gp_core) {
+                                               struct dentry *mali_gp_gpx_dir;
+                                               mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+                                               if (NULL != mali_gp_gpx_dir) {
+                                                       debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops);
+                                                       debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops);
+                                               }
+                                               break; /* no need to look for any other GP cores */
+                                       }
+
+                               }
+                       }
+
+                       mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+                       if (mali_pp_dir != NULL) {
+                               u32 num_groups;
+                               int i;
+
+                               debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
+                               debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
+                               debugfs_create_file("core_scaling_enabled", 0600, mali_pp_dir, NULL, &pp_core_scaling_enabled_fops);
+
+                               num_groups = mali_group_get_glob_num_groups();
+                               for (i = 0; i < num_groups; i++) {
+                                       struct mali_group *group = mali_group_get_glob_group(i);
+
+                                       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+                                       if (NULL != pp_core) {
+                                               char buf[16];
+                                               struct dentry *mali_pp_ppx_dir;
+                                               _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+                                               mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+                                               if (NULL != mali_pp_ppx_dir) {
+                                                       debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops);
+                                                       if (!mali_group_is_virtual(group)) {
+                                                               debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops);
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+
+                       mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+                       if (mali_l2_dir != NULL) {
+                               struct dentry *mali_l2_all_dir;
+                               u32 l2_id;
+                               struct mali_l2_cache_core *l2_cache;
+
+                               mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+                               if (mali_l2_all_dir != NULL) {
+                                       debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+                                       debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+                               }
+
+                               l2_id = 0;
+                               l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+                               while (NULL != l2_cache) {
+                                       char buf[16];
+                                       struct dentry *mali_l2_l2x_dir;
+                                       _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+                                       mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+                                       if (NULL != mali_l2_l2x_dir) {
+                                               debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+                                               debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+                                               debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
+                                       }
+
+                                       /* try next L2 */
+                                       l2_id++;
+                                       l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+                               }
+                       }
+
+                       debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+                       debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+                       debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+                       debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+                       mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+                       if (mali_profiling_dir != NULL) {
+                               u32 max_sub_jobs;
+                               int i;
+                               struct dentry *mali_profiling_gp_dir;
+                               struct dentry *mali_profiling_pp_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+                               struct dentry *mali_profiling_proc_dir;
+#endif
+                               /*
+                                * Create directory where we can set GP HW counters.
+                                */
+                               mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir);
+                               if (mali_profiling_gp_dir != NULL) {
+                                       debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
+                               }
+
+                               /*
+                                * Create directory where we can set PP HW counters.
+                                * Possible override with specific HW counters for a particular sub job
+                                * (Disable core scaling before using the override!)
+                                */
+                               mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir);
+                               if (mali_profiling_pp_dir != NULL) {
+                                       debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
+                               }
+
+                               max_sub_jobs = mali_pp_scheduler_get_num_cores_total();
+                               for (i = 0; i < max_sub_jobs; i++) {
+                                       char buf[16];
+                                       struct dentry *mali_profiling_pp_x_dir;
+                                       _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
+                                       mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
+                                       if (NULL != mali_profiling_pp_x_dir) {
+                                               debugfs_create_file("counter_src0", 0600, mali_profiling_pp_x_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i), &profiling_counter_src_fops);
+                                               debugfs_create_file("counter_src1", 0600, mali_profiling_pp_x_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i), &profiling_counter_src_fops);
+                                       }
+                               }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+                               mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+                               if (mali_profiling_proc_dir != NULL) {
+                                       struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+                                       if (mali_profiling_proc_default_dir != NULL) {
+                                               debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void*)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+                                       }
+                               }
+                               debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+                               debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+                               debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+#endif
+                       }
+
+#if MALI_STATE_TRACKING
+                       debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+                       if (mali_sysfs_user_settings_register()) {
+                               /* Failed to create the debugfs entries for the user settings DB. */
+                               MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+                       }
+               }
+       }
+
+       /* Success! */
+       return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+       if(NULL != mali_debugfs_dir) {
+               debugfs_remove_recursive(mali_debugfs_dir);
+       }
+       return 0;
+}
+
+#else /* MALI_LICENSE_IS_GPL */
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+       return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+       return 0;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_sysfs.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_kernel_sysfs.h
new file mode 100644 (file)
index 0000000..d3fd3e6
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_linux_trace.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_linux_trace.h
new file mode 100644 (file)
index 0000000..0d7c81c
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef  TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ *  encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+            TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+                     unsigned int d2, unsigned int d3, unsigned int d4),
+
+            TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+            TP_STRUCT__entry(
+                __field(unsigned int, event_id)
+                __field(unsigned int, d0)
+                __field(unsigned int, d1)
+                __field(unsigned int, d2)
+                __field(unsigned int, d3)
+                __field(unsigned int, d4)
+            ),
+
+            TP_fast_assign(
+                __entry->event_id = event_id;
+                __entry->d0 = d0;
+                __entry->d1 = d1;
+                __entry->d2 = d2;
+                __entry->d3 = d3;
+                __entry->d4 = d4;
+            ),
+
+            TP_printk("event=%d", __entry->event_id)
+           );
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+            TP_PROTO(unsigned int counter_id, unsigned int value),
+
+            TP_ARGS(counter_id, value),
+
+            TP_STRUCT__entry(
+                __field(unsigned int, counter_id)
+                __field(unsigned int, value)
+            ),
+
+            TP_fast_assign(
+                __entry->counter_id = counter_id;
+            ),
+
+            TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+           );
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+            TP_PROTO(pid_t pid, pid_t tid, void * surface_id, unsigned int * counters),
+
+            TP_ARGS(pid, tid, surface_id, counters),
+
+            TP_STRUCT__entry(
+                __field(pid_t, pid)
+                __field(pid_t, tid)
+                __field(void *, surface_id)
+                __field(unsigned int *, counters)
+            ),
+
+            TP_fast_assign(
+                __entry->pid = pid;
+                __entry->tid = tid;
+                __entry->surface_id = surface_id;
+                __entry->counters = counters;
+            ),
+
+            TP_printk("counters were %s", __entry->counters == NULL? "NULL" : "not NULL")
+           );
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory.c
new file mode 100644 (file)
index 0000000..8578090
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_descriptor_mapping.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_block_alloc.h"
+
+/* session->memory_lock must be held when calling this function */
+static void mali_mem_release(mali_mem_allocation *descriptor)
+{
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock);
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       switch (descriptor->type) {
+       case MALI_MEM_OS:
+               mali_mem_os_release(descriptor);
+               break;
+       case MALI_MEM_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               mali_mem_dma_buf_release(descriptor);
+#endif
+               break;
+       case MALI_MEM_UMP:
+#if defined(CONFIG_MALI400_UMP)
+               mali_mem_ump_release(descriptor);
+#endif
+               break;
+       case MALI_MEM_EXTERNAL:
+               mali_mem_external_release(descriptor);
+               break;
+       case MALI_MEM_BLOCK:
+               mali_mem_block_release(descriptor);
+               break;
+       }
+}
+
+static void mali_mem_vma_open(struct vm_area_struct * vma)
+{
+       mali_mem_allocation *descriptor = (mali_mem_allocation*)vma->vm_private_data;
+       MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+       descriptor->cpu_mapping.ref++;
+
+       return;
+}
+
+static void mali_mem_vma_close(struct vm_area_struct *vma)
+{
+       mali_mem_allocation *descriptor;
+       struct mali_session_data *session;
+       mali_mem_virt_cpu_mapping *mapping;
+
+       MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+       descriptor = (mali_mem_allocation*)vma->vm_private_data;
+       BUG_ON(!descriptor);
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       mapping = &descriptor->cpu_mapping;
+       BUG_ON(0 == mapping->ref);
+
+       mapping->ref--;
+       if (0 != mapping->ref) {
+               MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", mapping->ref));
+               return;
+       }
+
+       session = descriptor->session;
+
+       mali_descriptor_mapping_free(session->descriptor_mapping, descriptor->id);
+
+       _mali_osk_mutex_wait(session->memory_lock);
+       mali_mem_release(descriptor);
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       mali_mem_descriptor_destroy(descriptor);
+}
+
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       void __user * address;
+       mali_mem_allocation *descriptor;
+
+       address = vmf->virtual_address;
+       descriptor = (mali_mem_allocation *)vma->vm_private_data;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       /*
+        * We always fail the call since all memory is pre-faulted when assigned to the process.
+        * Only the Mali cores can use page faults to extend buffers.
+       */
+
+       MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+       MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+       MALI_IGNORE(address);
+       MALI_IGNORE(descriptor);
+
+       return VM_FAULT_SIGBUS;
+}
+
+struct vm_operations_struct mali_kernel_vm_ops = {
+       .open = mali_mem_vma_open,
+       .close = mali_mem_vma_close,
+       .fault = mali_kernel_memory_cpu_page_fault_handler
+};
+
+/** @note munmap handler is done by vma close handler */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct mali_session_data *session;
+       mali_mem_allocation *descriptor;
+       u32 size = vma->vm_end - vma->vm_start;
+       u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
+
+       session = (struct mali_session_data *)filp->private_data;
+       if (NULL == session) {
+               MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+               return -EFAULT;
+       }
+
+       MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
+                            (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
+                            (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+       /* Set some bits which indicate that, the memory is IO memory, meaning
+        * that no paging is to be performed and the memory should not be
+        * included in crash dumps. And that the memory is reserved, meaning
+        * that it's present and can never be paged out (see also previous
+        * entry)
+        */
+       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_DONTCOPY;
+       vma->vm_flags |= VM_PFNMAP;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
+       vma->vm_flags |= VM_RESERVED;
+#else
+       vma->vm_flags |= VM_DONTDUMP;
+       vma->vm_flags |= VM_DONTEXPAND;
+#endif
+
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+       descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
+       if (NULL == descriptor) {
+               descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
+               if (NULL == descriptor) {
+                       MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
+                       return -ENOMEM;
+               }
+       }
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       vma->vm_private_data = (void*)descriptor;
+
+       /* Put on descriptor map */
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_os_release(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+
+/* Prepare memory descriptor */
+mali_mem_allocation *mali_mem_descriptor_create(struct mali_session_data *session, mali_mem_type type)
+{
+       mali_mem_allocation *descriptor;
+
+       descriptor = (mali_mem_allocation*)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
+       if (NULL == descriptor) {
+               MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n"));
+               return NULL;
+       }
+
+       MALI_DEBUG_CODE(descriptor->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+
+       descriptor->flags = 0;
+       descriptor->type = type;
+       descriptor->session = session;
+
+       return descriptor;
+}
+
+void mali_mem_descriptor_destroy(mali_mem_allocation *descriptor)
+{
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+       MALI_DEBUG_CODE(descriptor->magic = MALI_MEM_ALLOCATION_FREED_MAGIC);
+
+       kfree(descriptor);
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
+{
+       u32 size = descriptor->size;
+       struct mali_session_data *session = descriptor->session;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       /* Map dma-buf into this session's page tables */
+
+       if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               size += MALI_MMU_PAGE_SIZE;
+       }
+
+       return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_mapping.addr, size);
+}
+
+void mali_mem_mali_map_free(mali_mem_allocation *descriptor)
+{
+       u32 size = descriptor->size;
+       struct mali_session_data *session = descriptor->session;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               size += MALI_MMU_PAGE_SIZE;
+       }
+
+       /* Umap and flush L2 */
+       mali_mmu_pagedir_unmap(session->page_directory, descriptor->mali_mapping.addr, descriptor->size);
+
+       mali_scheduler_zap_all_active(session);
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+       u32 sum = 0;
+
+       sum += mali_mem_block_allocator_stat();
+       sum += mali_mem_os_stat();
+
+       return sum;
+}
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data)
+{
+       MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+       /* Create descriptor mapping table */
+       session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+       if (NULL == session_data->descriptor_mapping) {
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                                   _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+
+       if (NULL == session_data->memory_lock) {
+               mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+               _mali_osk_free(session_data);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+       MALI_SUCCESS;
+}
+
+/** @brief Callback function that releases memory
+ *
+ * session->memory_lock must be held when calling this function.
+ */
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+       mali_mem_allocation *descriptor;
+
+       descriptor = (mali_mem_allocation*)map_target;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock);
+
+       MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+       MALI_DEBUG_ASSERT(descriptor);
+
+       mali_mem_release(descriptor);
+       mali_mem_descriptor_destroy(descriptor);
+}
+
+void mali_memory_session_end(struct mali_session_data *session)
+{
+       MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+       if (NULL == session) {
+               MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+               return;
+       }
+
+       /* Lock the session so we can modify the memory list */
+       _mali_osk_mutex_wait(session->memory_lock);
+
+       /* Free all allocations still in the descriptor map, and terminate the map */
+       if (NULL != session->descriptor_mapping) {
+               mali_descriptor_mapping_call_for_each(session->descriptor_mapping, descriptor_table_cleanup_callback);
+               mali_descriptor_mapping_destroy(session->descriptor_mapping);
+               session->descriptor_mapping = NULL;
+       }
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       /* Free the lock */
+       _mali_osk_mutex_term(session->memory_lock);
+
+       return;
+}
+
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+       return mali_mem_os_init();
+}
+
+void mali_memory_terminate(void)
+{
+       mali_mem_os_term();
+       mali_mem_block_allocator_destroy(NULL);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory.h
new file mode 100644 (file)
index 0000000..4c5a863
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+
+_mali_osk_errcode_t mali_memory_initialize(void);
+void mali_memory_terminate(void);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+       return mali_mem_os_get_table_page(table_page, mapping);
+}
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+MALI_STATIC_INLINE void mali_mmu_release_table_page(u32 phys, void *virt)
+{
+       mali_mem_os_release_table_page(phys, virt);
+}
+
+/** @brief mmap function
+ *
+ * mmap syscalls on the Mali device node will end up here.
+ *
+ * This function allocates Mali memory and maps it on CPU and Mali.
+ */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/** @brief Allocate and initialize a Mali memory descriptor
+ *
+ * @param session Pointer to the session allocating the descriptor
+ * @param type Type of memory the descriptor will represent
+ */
+mali_mem_allocation *mali_mem_descriptor_create(struct mali_session_data *session, mali_mem_type type);
+
+/** @brief Destroy a Mali memory descriptor
+ *
+ * This function will only free the descriptor itself, and not the memory it
+ * represents.
+ *
+ * @param descriptor Pointer to the descriptor to destroy
+ */
+void mali_mem_descriptor_destroy(mali_mem_allocation *descriptor);
+
+/** @brief Start a new memory session
+ *
+ * Called when a process opens the Mali device node.
+ *
+ * @param session Pointer to session to initialize
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session);
+
+/** @brief Close a memory session
+ *
+ * Called when a process closes the Mali device node.
+ *
+ * Memory allocated by the session will be freed
+ *
+ * @param session Pointer to the session to terminate
+ */
+void mali_memory_session_end(struct mali_session_data *session);
+
+/** @brief Prepare Mali page tables for mapping
+ *
+ * This function will prepare the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ */
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor);
+
+/** @brief Free Mali page tables for mapping
+ *
+ * This function will unmap pages from Mali memory and free the page tables
+ * that are now unused.
+ *
+ * The updated pages in the Mali L2 cache will be invalidated, and the MMU TLBs will be zapped if necessary.
+ *
+ * @param descriptor Pointer to the memory descriptor to unmap
+ */
+void mali_mem_mali_map_free(mali_mem_allocation *descriptor);
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+
+void mali_mem_ump_release(mali_mem_allocation *descriptor);
+void mali_mem_external_release(mali_mem_allocation *descriptor);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_block_alloc.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_block_alloc.c
new file mode 100644 (file)
index 0000000..9096259
--- /dev/null
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+#define MALI_BLOCK_SIZE (256UL * 1024UL)  /* 256 kB, remember to keep the ()s */
+
+struct block_info {
+       struct block_info *next;
+};
+
+typedef struct block_info block_info;
+
+
+typedef struct block_allocator {
+       struct mutex mutex;
+       block_info *all_blocks;
+       block_info *first_free;
+       u32 base;
+       u32 cpu_usage_adjust;
+       u32 num_blocks;
+       u32 free_blocks;
+} block_allocator;
+
+static block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator *info, block_info *block)
+{
+       return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size)
+{
+       block_allocator *info;
+       u32 usable_size;
+       u32 num_blocks;
+
+       usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+       MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+       MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+       num_blocks = usable_size / MALI_BLOCK_SIZE;
+       MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+       if (usable_size == 0) {
+               MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+               return NULL;
+       }
+
+       info = _mali_osk_malloc(sizeof(block_allocator));
+       if (NULL != info) {
+               mutex_init(&info->mutex);
+               info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+               if (NULL != info->all_blocks) {
+                       u32 i;
+                       info->first_free = NULL;
+                       info->num_blocks = num_blocks;
+                       info->free_blocks = num_blocks;
+
+                       info->base = base_address;
+                       info->cpu_usage_adjust = cpu_usage_adjust;
+
+                       for ( i = 0; i < num_blocks; i++) {
+                               info->all_blocks[i].next = info->first_free;
+                               info->first_free = &info->all_blocks[i];
+                       }
+
+                       return (mali_mem_allocator *)info;
+               }
+               _mali_osk_free(info);
+       }
+
+       return NULL;
+}
+
+void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator)
+{
+       block_allocator *info = (block_allocator*)allocator;
+
+       info = mali_mem_block_gobal_allocator;
+       if (NULL == info) return;
+
+       MALI_DEBUG_ASSERT_POINTER(info);
+
+       _mali_osk_free(info->all_blocks);
+       _mali_osk_free(info);
+}
+
+static void mali_mem_block_mali_map(mali_mem_allocation *descriptor, u32 phys, u32 virt, u32 size)
+{
+       struct mali_page_directory *pagedir = descriptor->session->page_directory;
+       u32 prop = descriptor->mali_mapping.properties;
+       u32 offset = 0;
+
+       while (size) {
+               mali_mmu_pagedir_update(pagedir, virt + offset, phys + offset, MALI_MMU_PAGE_SIZE, prop);
+
+               size -= MALI_MMU_PAGE_SIZE;
+               offset += MALI_MMU_PAGE_SIZE;
+       }
+}
+
+static int mali_mem_block_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma, u32 mali_phys, u32 mapping_offset, u32 size, u32 cpu_usage_adjust)
+{
+       u32 virt = vma->vm_start + mapping_offset;
+       u32 cpu_phys = mali_phys + cpu_usage_adjust;
+       u32 offset = 0;
+       int ret;
+
+       while (size) {
+               ret = vm_insert_pfn(vma, virt + offset, __phys_to_pfn(cpu_phys + offset));
+
+               if (unlikely(ret)) {
+                       MALI_DEBUG_PRINT(1, ("Block allocator: Failed to insert pfn into vma\n"));
+                       return 1;
+               }
+
+               size -= MALI_MMU_PAGE_SIZE;
+               offset += MALI_MMU_PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+{
+       _mali_osk_errcode_t err;
+       mali_mem_allocation *descriptor;
+       block_allocator *info;
+       u32 left;
+       block_info *last_allocated = NULL;
+       block_allocator_allocation *ret_allocation;
+       u32 offset = 0;
+
+       size = ALIGN(size, MALI_BLOCK_SIZE);
+
+       info = mali_mem_block_gobal_allocator;
+       if (NULL == info) return NULL;
+
+       left = size;
+       MALI_DEBUG_ASSERT(0 != left);
+
+       descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
+       if (NULL == descriptor) {
+               return NULL;
+       }
+
+       descriptor->mali_mapping.addr = mali_addr;
+       descriptor->size = size;
+       descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
+       descriptor->cpu_mapping.ref = 1;
+
+       if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
+               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+       } else {
+               /* Cached Mali memory mapping */
+               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+               vma->vm_flags |= VM_SHARED;
+       }
+
+       ret_allocation = &descriptor->block_mem.mem;
+
+       ret_allocation->mapping_length = 0;
+
+       _mali_osk_mutex_wait(session->memory_lock);
+       mutex_lock(&info->mutex);
+
+       if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
+               MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
+               mutex_unlock(&info->mutex);
+               _mali_osk_mutex_signal(session->memory_lock);
+               mali_mem_descriptor_destroy(descriptor);
+               return NULL;
+       }
+
+       err = mali_mem_mali_map_prepare(descriptor);
+       if (_MALI_OSK_ERR_OK != err) {
+               mutex_unlock(&info->mutex);
+               _mali_osk_mutex_signal(session->memory_lock);
+               mali_mem_descriptor_destroy(descriptor);
+               return NULL;
+       }
+
+       while ((left > 0) && (info->first_free)) {
+               block_info *block;
+               u32 phys_addr;
+               u32 current_mapping_size;
+
+               block = info->first_free;
+               info->first_free = info->first_free->next;
+               block->next = last_allocated;
+               last_allocated = block;
+
+               phys_addr = get_phys(info, block);
+
+               if (MALI_BLOCK_SIZE < left) {
+                       current_mapping_size = MALI_BLOCK_SIZE;
+               } else {
+                       current_mapping_size = left;
+               }
+
+               mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
+               if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
+                       /* release all memory back to the pool */
+                       while (last_allocated) {
+                               /* This relinks every block we've just allocated back into the free-list */
+                               block = last_allocated->next;
+                               last_allocated->next = info->first_free;
+                               info->first_free = last_allocated;
+                               last_allocated = block;
+                       }
+
+                       mutex_unlock(&info->mutex);
+                       _mali_osk_mutex_signal(session->memory_lock);
+
+                       mali_mem_mali_map_free(descriptor);
+                       mali_mem_descriptor_destroy(descriptor);
+
+                       return NULL;
+               }
+
+               left -= current_mapping_size;
+               offset += current_mapping_size;
+               ret_allocation->mapping_length += current_mapping_size;
+
+               --info->free_blocks;
+       }
+
+       mutex_unlock(&info->mutex);
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       MALI_DEBUG_ASSERT(0 == left);
+
+       /* Record all the information about this allocation */
+       ret_allocation->last_allocated = last_allocated;
+       ret_allocation->info = info;
+
+       return descriptor;
+}
+
+void mali_mem_block_release(mali_mem_allocation *descriptor)
+{
+       block_allocator *info = descriptor->block_mem.mem.info;
+       block_info *block, *next;
+       block_allocator_allocation *allocation = &descriptor->block_mem.mem;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == descriptor->type);
+
+       block = allocation->last_allocated;
+
+       MALI_DEBUG_ASSERT_POINTER(block);
+
+       /* unmap */
+       mali_mem_mali_map_free(descriptor);
+
+       mutex_lock(&info->mutex);
+
+       while (block) {
+               MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+               next = block->next;
+
+               /* relink into free-list */
+               block->next = info->first_free;
+               info->first_free = block;
+
+               /* advance the loop */
+               block = next;
+
+               ++info->free_blocks;
+       }
+
+       mutex_unlock(&info->mutex);
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+       block_allocator *info = (block_allocator *)mali_mem_block_gobal_allocator;
+
+       if (NULL == info) return 0;
+
+       MALI_DEBUG_ASSERT_POINTER(info);
+
+       return (info->num_blocks - info->free_blocks) * MALI_BLOCK_SIZE;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+       mali_mem_allocator *allocator;
+
+       /* Do the low level linux operation first */
+
+       /* Request ownership of the memory */
+       if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
+               MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create generic block allocator object to handle it */
+       allocator = mali_mem_block_allocator_create(start, 0 /* cpu_usage_adjust */, size);
+
+       if (NULL == allocator) {
+               MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+               _mali_osk_mem_unreqregion(start, size);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       mali_mem_block_gobal_allocator = (block_allocator*)allocator;
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_block_alloc.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_block_alloc.h
new file mode 100644 (file)
index 0000000..84aec9e
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+
+#include "mali_memory_types.h"
+
+typedef struct mali_mem_allocator mali_mem_allocator;
+
+mali_mem_allocator *mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size);
+void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator);
+
+mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session);
+void mali_mem_block_release(mali_mem_allocation *descriptor);
+
+u32 mali_mem_block_allocator_stat(void);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_dma_buf.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_dma_buf.c
new file mode 100644 (file)
index 0000000..694887b
--- /dev/null
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/fs.h>     /* file system operations */
+#include <asm/uaccess.h>       /* user space access */
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#if defined (CONFIG_ION_EXYNOS)
+#include <linux/exynos_ion.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+
+#include "mali_pp_job.h"
+
+static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem);
+
+struct mali_dma_buf_attachment {
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attachment;
+       struct sg_table *sgt;
+       struct mali_session_data *session;
+       int map_ref;
+       struct mutex map_lock;
+       mali_bool is_mapped;
+       wait_queue_head_t wait_queue;
+};
+
+static void mali_dma_buf_release(struct mali_dma_buf_attachment *mem)
+{
+       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
+
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       /* We mapped implicitly on attach, so we need to unmap on release */
+       mali_dma_buf_unmap(mem);
+#endif
+
+       /* Wait for buffer to become unmapped */
+       wait_event(mem->wait_queue, !mem->is_mapped);
+       MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+       dma_buf_detach(mem->buf, mem->attachment);
+       dma_buf_put(mem->buf);
+
+       _mali_osk_free(mem);
+}
+
+void mali_mem_dma_buf_release(mali_mem_allocation *descriptor)
+{
+       struct mali_dma_buf_attachment *mem = descriptor->dma_buf.attachment;
+
+       mali_dma_buf_release(mem);
+}
+
+/*
+ * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
+ */
+static int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_session_data *session, u32 virt, u32 flags)
+{
+       struct mali_page_directory *pagedir;
+       struct scatterlist *sg;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(mem->session == session);
+
+       mutex_lock(&mem->map_lock);
+
+       mem->map_ref++;
+
+       MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+       if (1 == mem->map_ref) {
+               /* First reference taken, so we need to map the dma buf */
+               MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+               pagedir = mali_session_get_page_directory(session);
+               MALI_DEBUG_ASSERT_POINTER(pagedir);
+
+               mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+               if (IS_ERR_OR_NULL(mem->sgt)) {
+                       MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+                       return -EFAULT;
+               }
+
+#if defined (CONFIG_ION_EXYNOS)
+               exynos_ion_sync_dmabuf_for_device(&mali_platform_device->dev, mem->buf,
+                               mem->buf->size, DMA_BIDIRECTIONAL);
+#endif
+
+               for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
+                       u32 size = sg_dma_len(sg);
+                       dma_addr_t phys = sg_dma_address(sg);
+
+                       /* sg must be page aligned. */
+                       MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+
+                       mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+                       virt += size;
+               }
+
+               if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+                       u32 guard_phys;
+                       MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+                       guard_phys = sg_dma_address(mem->sgt->sgl);
+                       mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+               }
+
+               mem->is_mapped = MALI_TRUE;
+               mutex_unlock(&mem->map_lock);
+
+               /* Wake up any thread waiting for buffer to become mapped */
+               wake_up_all(&mem->wait_queue);
+       } else {
+               MALI_DEBUG_ASSERT(mem->is_mapped);
+               mutex_unlock(&mem->map_lock);
+       }
+
+       return 0;
+}
+
+static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem)
+{
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+       mutex_lock(&mem->map_lock);
+
+       mem->map_ref--;
+
+       MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+       if (0 == mem->map_ref) {
+               dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+#if defined (CONFIG_ION_EXYNOS)
+               exynos_ion_sync_dmabuf_for_cpu(&mali_platform_device->dev, mem->buf,
+                               mem->buf->size, DMA_BIDIRECTIONAL);
+#endif
+               mem->is_mapped = MALI_FALSE;
+       }
+
+       mutex_unlock(&mem->map_lock);
+
+       /* Wake up any thread waiting for buffer to become unmapped */
+       wake_up_all(&mem->wait_queue);
+}
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job)
+{
+       mali_mem_allocation *descriptor;
+       struct mali_dma_buf_attachment *mem;
+       _mali_osk_errcode_t err;
+       int i;
+       int ret = 0;
+
+       _mali_osk_mutex_wait(job->session->memory_lock);
+
+       for (i = 0; i < job->num_memory_cookies; i++) {
+               int cookie = job->memory_cookies[i];
+
+               if (0 == cookie) {
+                       /* 0 is not a valid cookie */
+                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+                       continue;
+               }
+
+               MALI_DEBUG_ASSERT(0 < cookie);
+
+               err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
+                                                 cookie, (void**)&descriptor);
+
+               if (_MALI_OSK_ERR_OK != err) {
+                       MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
+                       ret = -EFAULT;
+                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+                       continue;
+               }
+
+               if (MALI_MEM_DMA_BUF != descriptor->type) {
+                       /* Not a DMA-buf */
+                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+                       continue;
+               }
+
+               mem = descriptor->dma_buf.attachment;
+
+               MALI_DEBUG_ASSERT_POINTER(mem);
+               MALI_DEBUG_ASSERT(mem->session == job->session);
+
+               err = mali_dma_buf_map(mem, mem->session, descriptor->mali_mapping.addr, descriptor->flags);
+               if (0 != err) {
+                       MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
+                                               cookie, descriptor->mali_mapping.addr));
+                       ret = -EFAULT;
+                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+                       continue;
+               }
+
+               /* Add mem to list of DMA-bufs mapped for this job */
+               job->dma_bufs[i] = mem;
+       }
+
+       _mali_osk_mutex_signal(job->session->memory_lock);
+
+       return ret;
+}
+
+void mali_dma_buf_unmap_job(struct mali_pp_job *job)
+{
+       int i;
+       for (i = 0; i < job->num_dma_bufs; i++) {
+               if (NULL == job->dma_bufs[i]) continue;
+
+               mali_dma_buf_unmap(job->dma_bufs[i]);
+               job->dma_bufs[i] = NULL;
+       }
+}
+#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *user_arg)
+{
+       struct dma_buf *buf;
+       struct mali_dma_buf_attachment *mem;
+       _mali_uk_attach_dma_buf_s args;
+       mali_mem_allocation *descriptor;
+       int md;
+       int fd;
+
+       /* Get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_attach_dma_buf_s))) {
+               return -EFAULT;
+       }
+
+       if (args.mali_address & ~PAGE_MASK) {
+               MALI_DEBUG_PRINT_ERROR(("Requested address (0x%08x) is not page aligned\n", args.mali_address));
+               return -EINVAL;
+       }
+
+       if (args.mali_address >= args.mali_address + args.size) {
+               MALI_DEBUG_PRINT_ERROR(("Requested address and size (0x%08x + 0x%08x) is too big\n", args.mali_address, args.size));
+               return -EINVAL;
+       }
+
+       fd = args.mem_fd;
+
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf)) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+               return PTR_RET(buf);
+       }
+
+       /* Currently, mapping of the full buffer are supported. */
+       if (args.size != buf->size) {
+               MALI_DEBUG_PRINT_ERROR(("dma-buf size doesn't match mapping size.\n"));
+               dma_buf_put(buf);
+               return -EINVAL;
+       }
+
+       mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+       if (NULL == mem) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to allocate dma-buf tracing struct\n"));
+               dma_buf_put(buf);
+               return -ENOMEM;
+       }
+
+       mem->buf = buf;
+       mem->session = session;
+       mem->map_ref = 0;
+       mutex_init(&mem->map_lock);
+       init_waitqueue_head(&mem->wait_queue);
+
+       mem->attachment = dma_buf_attach(mem->buf, &mali_platform_device->dev);
+       if (NULL == mem->attachment) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to attach to dma-buf %d\n", fd));
+               dma_buf_put(mem->buf);
+               _mali_osk_free(mem);
+               return -EFAULT;
+       }
+
+       /* Set up Mali memory descriptor */
+       descriptor = mali_mem_descriptor_create(session, MALI_MEM_DMA_BUF);
+       if (NULL == descriptor) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to allocate descriptor dma-buf %d\n", fd));
+               mali_dma_buf_release(mem);
+               return -ENOMEM;
+       }
+
+       descriptor->size = args.size;
+       descriptor->mali_mapping.addr = args.mali_address;
+
+       descriptor->dma_buf.attachment = mem;
+
+       descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+       if (args.flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       }
+
+       _mali_osk_mutex_wait(session->memory_lock);
+
+       /* Map dma-buf into this session's page tables */
+       if (_MALI_OSK_ERR_OK != mali_mem_mali_map_prepare(descriptor)) {
+               _mali_osk_mutex_signal(session->memory_lock);
+               MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf on Mali\n"));
+               mali_mem_descriptor_destroy(descriptor);
+               mali_dma_buf_release(mem);
+               return -ENOMEM;
+       }
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       /* Map memory into session's Mali virtual address space. */
+
+       if (0 != mali_dma_buf_map(mem, session, descriptor->mali_mapping.addr, descriptor->flags)) {
+               mali_mem_mali_map_free(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+
+               MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
+               mali_mem_descriptor_destroy(descriptor);
+               mali_dma_buf_release(mem);
+               return -ENOMEM;
+       }
+
+#endif
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       /* Get descriptor mapping for memory. */
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_mali_map_free(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+
+               MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
+               mali_mem_descriptor_destroy(descriptor);
+               mali_dma_buf_release(mem);
+               return -EFAULT;
+       }
+
+       /* Return stuff to user space */
+       if (0 != put_user(md, &user_arg->cookie)) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_mali_map_free(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+
+               MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
+               mali_descriptor_mapping_free(session->descriptor_mapping, md);
+               mali_dma_buf_release(mem);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *user_arg)
+{
+       int ret = 0;
+       _mali_uk_release_dma_buf_s args;
+       mali_mem_allocation *descriptor;
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_release_dma_buf_s)) ) {
+               return -EFAULT;
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
+
+       _mali_osk_mutex_wait(session->memory_lock);
+
+       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
+
+       if (NULL != descriptor) {
+               MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_mapping.addr));
+
+               mali_mem_mali_map_free(descriptor);
+
+               mali_dma_buf_release(descriptor->dma_buf.attachment);
+
+               mali_mem_descriptor_destroy(descriptor);
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
+               ret = -EINVAL;
+       }
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       /* Return the error that _mali_ukk_map_external_ump_mem produced */
+       return ret;
+}
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+       _mali_uk_dma_buf_get_size_s args;
+       int fd;
+       struct dma_buf *buf;
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s)) ) {
+               return -EFAULT;
+       }
+
+       /* Do DMA-BUF stuff */
+       fd = args.mem_fd;
+
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf)) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+               return PTR_RET(buf);
+       }
+
+       if (0 != put_user(buf->size, &user_arg->size)) {
+               dma_buf_put(buf);
+               return -EFAULT;
+       }
+
+       dma_buf_put(buf);
+
+       return 0;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_dma_buf.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_dma_buf.h
new file mode 100644 (file)
index 0000000..d810a8d
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_DMA_BUF_H__
+#define __MALI_MEMORY_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+struct mali_pp_job;
+
+struct mali_dma_buf_attachment;
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *arg);
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *arg);
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+
+void mali_mem_dma_buf_release(mali_mem_allocation *descriptor);
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job);
+void mali_dma_buf_unmap_job(struct mali_pp_job *job);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_external.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_external.c
new file mode 100644 (file)
index 0000000..f2730ec
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_mem_validation.h"
+#include "mali_uk_types.h"
+
+void mali_mem_external_release(mali_mem_allocation *descriptor)
+{
+       MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == descriptor->type);
+
+       mali_mem_mali_map_free(descriptor);
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args)
+{
+       struct mali_session_data *session;
+       mali_mem_allocation * descriptor;
+       int md;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+       /* check arguments */
+       /* NULL might be a valid Mali address */
+       if (! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       /* size must be a multiple of the system page size */
+       if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       MALI_DEBUG_PRINT(3,
+                        ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+                         (void*)args->phys_addr,
+                         (void*)(args->phys_addr + args->size -1),
+                         (void*)args->mali_address)
+                       );
+
+       /* Validate the mali physical range */
+       if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       descriptor = mali_mem_descriptor_create(session, MALI_MEM_EXTERNAL);
+       if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+       descriptor->mali_mapping.addr = args->mali_address;
+       descriptor->size = args->size;
+
+       if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       }
+
+       _mali_osk_mutex_wait(session->memory_lock);
+       {
+               u32 virt = descriptor->mali_mapping.addr;
+               u32 phys = args->phys_addr;
+               u32 size = args->size;
+
+               err = mali_mem_mali_map_prepare(descriptor);
+               if (_MALI_OSK_ERR_OK != err) {
+                       _mali_osk_mutex_signal(session->memory_lock);
+                       mali_mem_descriptor_destroy(descriptor);
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+
+               mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+               if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+                       mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+               }
+       }
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_external_release(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+               mali_mem_descriptor_destroy(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       args->cookie = md;
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+       mali_mem_allocation * descriptor;
+       void* old_value;
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void**)&descriptor)) {
+               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       old_value = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);
+
+       if (NULL != old_value) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_external_release(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+               mali_mem_descriptor_destroy(descriptor);
+       }
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_os_alloc.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_os_alloc.c
new file mode 100644 (file)
index 0000000..70b778c
--- /dev/null
@@ -0,0 +1,559 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_kernel_linux.h"
+
+/* Minimum size of allocator page pool */
+#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
+#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
+#endif
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
+static void mali_mem_os_trim_pool(struct work_struct *work);
+
+static struct mali_mem_os_allocator {
+       spinlock_t pool_lock;
+       struct list_head pool_pages;
+       size_t pool_count;
+
+       atomic_t allocated_pages;
+       size_t allocation_limit;
+
+       struct shrinker shrinker;
+       struct delayed_work timed_shrinker;
+       struct workqueue_struct *wq;
+} mali_mem_os_allocator = {
+       .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+       .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
+       .pool_count = 0,
+
+       .allocated_pages = ATOMIC_INIT(0),
+       .allocation_limit = 0,
+
+       .shrinker.shrink = mali_mem_os_shrink,
+       .shrinker.seeks = DEFAULT_SEEKS,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+       .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
+       .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#else
+       .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#endif
+};
+
+static void mali_mem_os_free(mali_mem_allocation *descriptor)
+{
+       LIST_HEAD(pages);
+
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+
+       atomic_sub(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+
+       /* Put pages on pool. */
+       list_cut_position(&pages, &descriptor->os_mem.pages, descriptor->os_mem.pages.prev);
+
+       spin_lock(&mali_mem_os_allocator.pool_lock);
+
+       list_splice(&pages, &mali_mem_os_allocator.pool_pages);
+       mali_mem_os_allocator.pool_count += descriptor->os_mem.count;
+
+       spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+               MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+               queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+       }
+}
+
+static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
+{
+       struct page *new_page, *tmp;
+       LIST_HEAD(pages);
+       size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+       size_t remaining = page_count;
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+
+       INIT_LIST_HEAD(&descriptor->os_mem.pages);
+       descriptor->os_mem.count = page_count;
+
+       /* Grab pages from pool. */
+       {
+               size_t pool_pages;
+               spin_lock(&mali_mem_os_allocator.pool_lock);
+               pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
+               for (i = pool_pages; i > 0; i--) {
+                       BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
+                       list_move(mali_mem_os_allocator.pool_pages.next, &pages);
+               }
+               mali_mem_os_allocator.pool_count -= pool_pages;
+               remaining -= pool_pages;
+               spin_unlock(&mali_mem_os_allocator.pool_lock);
+       }
+
+       /* Process pages from pool. */
+       i = 0;
+       list_for_each_entry_safe(new_page, tmp, &pages, lru) {
+               BUG_ON(NULL == new_page);
+
+               list_move_tail(&new_page->lru, &descriptor->os_mem.pages);
+       }
+
+       /* Allocate new pages, if needed. */
+       for (i = 0; i < remaining; i++) {
+               dma_addr_t dma_addr;
+
+               new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+               if (unlikely(NULL == new_page)) {
+                       /* Calculate the number of pages actually allocated, and free them. */
+                       descriptor->os_mem.count = (page_count - remaining) + i;
+                       atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(descriptor);
+                       return -ENOMEM;
+               }
+
+               /* Ensure page is flushed from CPU caches. */
+               dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+               /* Store page phys addr */
+               SetPagePrivate(new_page);
+               set_page_private(new_page, dma_addr);
+
+               list_add_tail(&new_page->lru, &descriptor->os_mem.pages);
+       }
+
+       atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+               MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+               cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+       }
+
+       return 0;
+}
+
+static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_session_data *session)
+{
+       struct mali_page_directory *pagedir = session->page_directory;
+       struct page *page;
+       _mali_osk_errcode_t err;
+       u32 virt = descriptor->mali_mapping.addr;
+       u32 prop = descriptor->mali_mapping.properties;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+
+       err = mali_mem_mali_map_prepare(descriptor);
+       if (_MALI_OSK_ERR_OK != err) {
+               return -ENOMEM;
+       }
+
+       list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
+               u32 phys = page_private(page);
+               mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+               virt += MALI_MMU_PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static void mali_mem_os_mali_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
+{
+       mali_mem_mali_map_free(descriptor);
+}
+
+static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma)
+{
+       struct page *page;
+       int ret;
+       unsigned long addr = vma->vm_start;
+
+       list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
+               /* We should use vm_insert_page, but it does a dcache
+                * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+               ret = vm_insert_page(vma, addr, page);
+               */
+               ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
+
+               if (unlikely(0 != ret)) {
+                       return -EFAULT;
+               }
+               addr += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+{
+       mali_mem_allocation *descriptor;
+       int err;
+
+       /* MALI_SEC */ //Remove limitation of Texture memory size for GLB2.7 T-rex
+#if 0
+       if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+               MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+                                    size,
+                                    atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+                                    mali_mem_os_allocator.allocation_limit));
+               return NULL;
+       }
+#endif
+
+       descriptor = mali_mem_descriptor_create(session, MALI_MEM_OS);
+       if (NULL == descriptor) return NULL;
+
+       descriptor->mali_mapping.addr = mali_addr;
+       descriptor->size = size;
+       descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
+       descriptor->cpu_mapping.ref = 1;
+
+       if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
+               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+       } else {
+               /* Cached Mali memory mapping */
+               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+               vma->vm_flags |= VM_SHARED;
+       }
+
+       err = mali_mem_os_alloc_pages(descriptor, size); /* Allocate pages */
+       if (0 != err) goto alloc_failed;
+
+       /* Take session memory lock */
+       _mali_osk_mutex_wait(session->memory_lock);
+
+       err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
+       if (0 != err) goto mali_map_failed;
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
+       if (0 != err) goto cpu_map_failed;
+
+       return descriptor;
+
+cpu_map_failed:
+       mali_mem_os_mali_unmap(session, descriptor);
+mali_map_failed:
+       _mali_osk_mutex_signal(session->memory_lock);
+       mali_mem_os_free(descriptor);
+alloc_failed:
+       mali_mem_descriptor_destroy(descriptor);
+       MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err));
+       return NULL;
+}
+
+void mali_mem_os_release(mali_mem_allocation *descriptor)
+{
+       struct mali_session_data *session = descriptor->session;
+
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_os_mali_unmap(session, descriptor);
+
+       /* Free pages */
+       mali_mem_os_free(descriptor);
+}
+
+
+#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
+static struct {
+       struct {
+               u32 phys;
+               mali_io_address mapping;
+       } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
+       u32 count;
+       spinlock_t lock;
+} mali_mem_page_table_page_pool = {
+       .count = 0,
+       .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+};
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+
+       spin_lock(&mali_mem_page_table_page_pool.lock);
+       if (0 < mali_mem_page_table_page_pool.count) {
+               u32 i = --mali_mem_page_table_page_pool.count;
+               *phys = mali_mem_page_table_page_pool.page[i].phys;
+               *mapping = mali_mem_page_table_page_pool.page[i].mapping;
+
+               ret = _MALI_OSK_ERR_OK;
+       }
+       spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+       if (_MALI_OSK_ERR_OK != ret) {
+               *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
+               if (NULL != *mapping) {
+                       ret = _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return ret;
+}
+
+void mali_mem_os_release_table_page(u32 phys, void *virt)
+{
+       spin_lock(&mali_mem_page_table_page_pool.lock);
+       if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
+               u32 i = mali_mem_page_table_page_pool.count;
+               mali_mem_page_table_page_pool.page[i].phys = phys;
+               mali_mem_page_table_page_pool.page[i].mapping = virt;
+
+               ++mali_mem_page_table_page_pool.count;
+
+               spin_unlock(&mali_mem_page_table_page_pool.lock);
+       } else {
+               spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+               dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+       }
+}
+
+static void mali_mem_os_free_page(struct page *page)
+{
+       BUG_ON(page_count(page) != 1);
+
+       dma_unmap_page(&mali_platform_device->dev, page_private(page),
+                      _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+       ClearPagePrivate(page);
+
+       __free_page(page);
+}
+
+/* The maximum number of page table pool pages to free in one go. */
+#define MALI_MEM_OS_CHUNK_TO_FREE 64UL
+
+/* Free a certain number of pages from the page table page pool.
+ * The pool lock must be held when calling the function, and the lock will be
+ * released before returning.
+ */
+static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
+{
+       u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+       void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+       u32 i;
+
+       MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
+
+       /* Remove nr_to_free pages from the pool and store them locally on stack. */
+       for (i = 0; i < nr_to_free; i++) {
+               u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
+
+               phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
+               virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
+       }
+
+       mali_mem_page_table_page_pool.count -= nr_to_free;
+
+       spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+       /* After releasing the spinlock: free the pages we removed from the pool. */
+       for (i = 0; i < nr_to_free; i++) {
+               dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
+       }
+}
+
+static void mali_mem_os_trim_page_table_page_pool(void)
+{
+       size_t nr_to_free = 0;
+       size_t nr_to_keep;
+
+       /* Keep 2 page table pages for each 1024 pages in the page cache. */
+       nr_to_keep = mali_mem_os_allocator.pool_count / 512;
+       /* And a minimum of eight pages, to accomodate new sessions. */
+       nr_to_keep += 8;
+
+       if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
+
+       if (nr_to_keep < mali_mem_page_table_page_pool.count) {
+               nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
+               nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
+       }
+
+       /* Pool lock will be released by the callee. */
+       mali_mem_os_page_table_pool_free(nr_to_free);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+#endif
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif
+{
+       struct page *page, *tmp;
+       unsigned long flags;
+       struct list_head *le, pages;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+       int nr = nr_to_scan;
+#else
+       int nr = sc->nr_to_scan;
+#endif
+
+       if (0 == nr) {
+               return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+       }
+
+       if (0 == mali_mem_os_allocator.pool_count) {
+               /* No pages availble */
+               return 0;
+       }
+
+       if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
+               /* Not able to lock. */
+               return -1;
+       }
+
+       /* Release from general page pool */
+       nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
+       mali_mem_os_allocator.pool_count -= nr;
+       list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+               --nr;
+               if (0 == nr) break;
+       }
+       list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+       spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+
+       list_for_each_entry_safe(page, tmp, &pages, lru) {
+               mali_mem_os_free_page(page);
+       }
+
+       /* Release some pages from page table page pool */
+       mali_mem_os_trim_page_table_page_pool();
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+               /* Pools are empty, stop timer */
+               MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+               cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+       }
+
+       return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+}
+
+static void mali_mem_os_trim_pool(struct work_struct *data)
+{
+       struct page *page, *tmp;
+       struct list_head *le;
+       LIST_HEAD(pages);
+       size_t nr_to_free;
+
+       MALI_IGNORE(data);
+
+       MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
+
+       /* Release from general page pool */
+       spin_lock(&mali_mem_os_allocator.pool_lock);
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+               size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+               /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
+               nr_to_free = max(count / 2, (size_t)64);
+
+               mali_mem_os_allocator.pool_count -= nr_to_free;
+               list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+                       --nr_to_free;
+                       if (0 == nr_to_free) break;
+               }
+               list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+       }
+       spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+       list_for_each_entry_safe(page, tmp, &pages, lru) {
+               mali_mem_os_free_page(page);
+       }
+
+       /* Release some pages from page table page pool */
+       mali_mem_os_trim_page_table_page_pool();
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+               MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+               queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+       }
+}
+
+_mali_osk_errcode_t mali_mem_os_init(void)
+{
+       mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
+       if (NULL == mali_mem_os_allocator.wq) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       register_shrinker(&mali_mem_os_allocator.shrinker);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_os_term(void)
+{
+       struct page *page, *tmp;
+
+       unregister_shrinker(&mali_mem_os_allocator.shrinker);
+       cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
+       destroy_workqueue(mali_mem_os_allocator.wq);
+
+       spin_lock(&mali_mem_os_allocator.pool_lock);
+       list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
+               mali_mem_os_free_page(page);
+
+               --mali_mem_os_allocator.pool_count;
+       }
+       BUG_ON(mali_mem_os_allocator.pool_count);
+       spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+       /* Release from page table page pool */
+       do {
+               u32 nr_to_free;
+
+               spin_lock(&mali_mem_page_table_page_pool.lock);
+
+               nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
+
+               /* Pool lock will be released by the callee. */
+               mali_mem_os_page_table_pool_free(nr_to_free);
+       } while (0 != mali_mem_page_table_page_pool.count);
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+       mali_mem_os_allocator.allocation_limit = size;
+
+       MALI_SUCCESS;
+}
+
+u32 mali_mem_os_stat(void)
+{
+       return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_os_alloc.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_os_alloc.h
new file mode 100644 (file)
index 0000000..8842c81
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_OS_ALLOC_H__
+#define __MALI_MEMORY_OS_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include "mali_memory_types.h"
+
+/* OS memory allocator */
+/** @brief Allocate memory from OS
+ *
+ * This function will create a descriptor, allocate pages and map these on the CPU and Mali.
+ *
+ * @param mali_addr Mali virtual address to use for Mali mapping
+ * @param size Size to allocate
+ * @param vma Pointer to vma for CPU mapping
+ * @param session Pointer to session doing the allocation
+ */
+mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session);
+
+/** @brief Release Mali OS memory
+ *
+ * The session memory_lock must be held when calling this function.
+ *
+ * @param descriptor Pointer to the descriptor to release
+ */
+void mali_mem_os_release(mali_mem_allocation *descriptor);
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping);
+
+void mali_mem_os_release_table_page(u32 phys, void *virt);
+
+_mali_osk_errcode_t mali_mem_os_init(void);
+void mali_mem_os_term(void);
+u32 mali_mem_os_stat(void);
+
+#endif /* __MALI_MEMORY_OS_ALLOC_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_types.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_types.h
new file mode 100644 (file)
index 0000000..c560541
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_TYPES_H__
+#define __MALI_MEMORY_TYPES_H__
+
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+typedef u32 mali_address_t;
+
+typedef enum mali_mem_type {
+       MALI_MEM_OS,
+       MALI_MEM_EXTERNAL,
+       MALI_MEM_DMA_BUF,
+       MALI_MEM_UMP,
+       MALI_MEM_BLOCK,
+} mali_mem_type;
+
+typedef struct mali_mem_os_mem {
+       struct list_head pages;
+       u32 count;
+} mali_mem_os_mem;
+
+typedef struct mali_mem_dma_buf {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+       struct mali_dma_buf_attachment *attachment;
+#endif
+} mali_mem_dma_buf;
+
+typedef struct mali_mem_external {
+       dma_addr_t phys;
+       u32 size;
+} mali_mem_external;
+
+typedef struct mali_mem_ump {
+#if defined(CONFIG_MALI400_UMP)
+       ump_dd_handle handle;
+#endif
+} mali_mem_ump;
+
+typedef struct block_allocator_allocation {
+       /* The list will be released in reverse order */
+       struct block_info *last_allocated;
+       u32 mapping_length;
+       struct block_allocator *info;
+} block_allocator_allocation;
+
+typedef struct mali_mem_block_mem {
+       block_allocator_allocation mem;
+} mali_mem_block_mem;
+
+typedef struct mali_mem_virt_mali_mapping {
+       mali_address_t addr; /* Virtual Mali address */
+       u32 properties;      /* MMU Permissions + cache, must match MMU HW */
+} mali_mem_virt_mali_mapping;
+
+typedef struct mali_mem_virt_cpu_mapping {
+       void __user *addr;
+       u32 ref;
+} mali_mem_virt_cpu_mapping;
+
+#define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c
+#define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010
+
+typedef struct mali_mem_allocation {
+       MALI_DEBUG_CODE(u32 magic);
+       mali_mem_type type;                /**< Type of memory */
+       int id;                            /**< ID in the descriptor map for this allocation */
+
+       u32 size;                          /**< Size of the allocation */
+       u32 flags;                         /**< Flags for this allocation */
+
+       struct mali_session_data *session; /**< Pointer to session that owns the allocation */
+
+       /* Union selected by type. */
+       union {
+               mali_mem_os_mem os_mem;       /**< MALI_MEM_OS */
+               mali_mem_external ext_mem;    /**< MALI_MEM_EXTERNAL */
+               mali_mem_dma_buf dma_buf;     /**< MALI_MEM_DMA_BUF */
+               mali_mem_ump ump_mem;         /**< MALI_MEM_UMP */
+               mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */
+       };
+
+       mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
+       mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
+} mali_mem_allocation;
+
+#define MALI_MEM_FLAG_MALI_GUARD_PAGE (1 << 0)
+#define MALI_MEM_FLAG_DONT_CPU_MAP    (1 << 1)
+
+#endif /* __MALI_MEMORY_TYPES__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_ump.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_memory_ump.c
new file mode 100644 (file)
index 0000000..a322ba0
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+
+#include "ump_kernel_interface.h"
+
+static int mali_ump_map(struct mali_session_data *session, mali_mem_allocation *descriptor)
+{
+       ump_dd_handle ump_mem;
+       u32 nr_blocks;
+       u32 i;
+       ump_dd_physical_block *ump_blocks;
+       struct mali_page_directory *pagedir;
+       u32 offset = 0;
+       u32 prop;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(descriptor);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type);
+
+       ump_mem = descriptor->ump_mem.handle;
+       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+       nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+       if (nr_blocks == 0) {
+               MALI_DEBUG_PRINT(1, ("No block count\n"));
+               return -EINVAL;
+       }
+
+       ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks);
+       if (NULL == ump_blocks) {
+               return -ENOMEM;
+       }
+
+       if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) {
+               _mali_osk_free(ump_blocks);
+               return -EFAULT;
+       }
+
+       pagedir = session->page_directory;
+       prop = descriptor->mali_mapping.properties;
+
+       err = mali_mem_mali_map_prepare(descriptor);
+       if (_MALI_OSK_ERR_OK != err) {
+               MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n"));
+
+               _mali_osk_free(ump_blocks);
+               return -ENOMEM;
+       }
+
+       for(i = 0; i < nr_blocks; ++i) {
+               u32 virt = descriptor->mali_mapping.addr + offset;
+
+               MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+
+               mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr,
+                                       ump_blocks[i].size, prop);
+
+               offset += ump_blocks[i].size;
+       }
+
+       if (descriptor->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               u32 virt = descriptor->mali_mapping.addr + offset;
+
+               /* Map in an extra virtual guard page at the end of the VMA */
+               MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n"));
+
+               mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, prop);
+
+               offset += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+
+       _mali_osk_free(ump_blocks);
+
+       return 0;
+}
+
+void mali_ump_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
+{
+       ump_dd_handle ump_mem;
+       struct mali_page_directory *pagedir;
+
+       ump_mem = descriptor->ump_mem.handle;
+       pagedir = session->page_directory;
+
+       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+       mali_mem_mali_map_free(descriptor);
+
+       ump_dd_reference_release(ump_mem);
+       return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
+{
+       ump_dd_handle ump_mem;
+       struct mali_session_data *session;
+       mali_mem_allocation *descriptor;
+       int md, ret;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+       /* check arguments */
+       /* NULL might be a valid Mali address */
+       if (!args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       /* size must be a multiple of the system page size */
+       if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       MALI_DEBUG_PRINT(3,
+                        ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+                         args->secure_id, args->mali_address, args->size));
+
+       ump_mem = ump_dd_handle_create_from_secure_id((int)args->secure_id);
+
+       if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+       descriptor = mali_mem_descriptor_create(session, MALI_MEM_UMP);
+       if (NULL == descriptor) {
+               ump_dd_reference_release(ump_mem);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       descriptor->ump_mem.handle = ump_mem;
+       descriptor->mali_mapping.addr = args->mali_address;
+       descriptor->size = args->size;
+       descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+       descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+
+       if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       }
+
+       _mali_osk_mutex_wait(session->memory_lock);
+
+       ret = mali_ump_map(session, descriptor);
+       if (0 != ret) {
+               _mali_osk_mutex_signal(session->memory_lock);
+               ump_dd_reference_release(ump_mem);
+               mali_mem_descriptor_destroy(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       }
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
+               ump_dd_reference_release(ump_mem);
+               mali_mem_descriptor_destroy(descriptor);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       args->cookie = md;
+
+       MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+       MALI_SUCCESS;
+}
+
+void mali_mem_ump_release(mali_mem_allocation *descriptor)
+{
+       struct mali_session_data *session = descriptor->session;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type);
+
+       mali_ump_unmap(session, descriptor);
+}
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args)
+{
+       mali_mem_allocation * descriptor;
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+       session = (struct mali_session_data *)args->ctx;
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void**)&descriptor)) {
+               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);
+
+       if (NULL != descriptor) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_ump_release(descriptor);
+               _mali_osk_mutex_signal(session->memory_lock);
+
+               mali_mem_descriptor_destroy(descriptor);
+       }
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_atomics.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_atomics.c
new file mode 100644 (file)
index 0000000..0cdbace
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+       atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+       return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+       atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+       return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+       MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+       atomic_set((atomic_t *)&atom->u.val, val);
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+       return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+       MALI_IGNORE(atom);
+}
+
+u32 _mali_osk_atomic_xchg( _mali_osk_atomic_t *atom, u32 val )
+{
+       return atomic_xchg((atomic_t*)&atom->u.val, val);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_irq.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_irq.c
new file mode 100644 (file)
index 0000000..3164607
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h>        /* For memory allocation */
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+typedef struct _mali_osk_irq_t_struct {
+       u32 irqnum;
+       void *data;
+       _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+#if defined(DEBUG)
+#if 0
+
+struct test_interrupt_data {
+       _mali_osk_irq_ack_t ack_func;
+       void *probe_data;
+       mali_bool interrupt_received;
+       wait_queue_head_t wq;
+};
+
+static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id)
+{
+       irqreturn_t ret = IRQ_NONE;
+       struct test_interrupt_data *data = (struct test_interrupt_data *)dev_id;
+
+       if (_MALI_OSK_ERR_OK == data->ack_func(data->probe_data)) {
+               data->interrupt_received = MALI_TRUE;
+               wake_up(&data->wq);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+static _mali_osk_errcode_t test_interrupt(u32 irqnum,
+        _mali_osk_irq_trigger_t trigger_func,
+        _mali_osk_irq_ack_t ack_func,
+        void *probe_data,
+        const char *description)
+{
+       unsigned long irq_flags = 0;
+       struct test_interrupt_data data = {
+               .ack_func = ack_func,
+               .probe_data = probe_data,
+               .interrupt_received = MALI_FALSE,
+       };
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+       if (0 != request_irq(irqnum, test_interrupt_upper_half, irq_flags, description, &data)) {
+               MALI_DEBUG_PRINT(2, ("Unable to install test IRQ handler for core '%s'\n", description));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       init_waitqueue_head(&data.wq);
+
+       trigger_func(probe_data);
+       wait_event_timeout(data.wq, data.interrupt_received, 100);
+
+       free_irq(irqnum, &data);
+
+       if (data.interrupt_received) {
+               MALI_DEBUG_PRINT(3, ("%s: Interrupt test OK\n", description));
+               return _MALI_OSK_ERR_OK;
+       } else {
+               MALI_PRINT_ERROR(("%s: Failed interrupt test on %u\n", description, irqnum));
+               return _MALI_OSK_ERR_FAULT;
+       }
+}
+#endif
+
+#endif /* defined(DEBUG) */
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description )
+{
+       mali_osk_irq_object_t *irq_object;
+       unsigned long irq_flags = 0;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+       irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+       if (NULL == irq_object) {
+               return NULL;
+       }
+
+       if (-1 == irqnum) {
+               /* Probe for IRQ */
+               if ( (NULL != trigger_func) && (NULL != ack_func) ) {
+                       unsigned long probe_count = 3;
+                       _mali_osk_errcode_t err;
+                       int irq;
+
+                       MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+                       do {
+                               unsigned long mask;
+
+                               mask = probe_irq_on();
+                               trigger_func(probe_data);
+
+                               _mali_osk_time_ubusydelay(5);
+
+                               irq = probe_irq_off(mask);
+                               err = ack_func(probe_data);
+                       } while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+                       if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+                       else irqnum = irq;
+               } else irqnum = -1; /* no probe functions, fault */
+
+               if (-1 != irqnum) {
+                       /* found an irq */
+                       MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+               }
+       }
+
+       irq_object->irqnum = irqnum;
+       irq_object->uhandler = uhandler;
+       irq_object->data = int_data;
+
+       if (-1 == irqnum) {
+               MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+
+#if defined(DEBUG)
+#if 0
+       /* Verify that the configured interrupt settings are working */
+       if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) {
+               MALI_DEBUG_PRINT(2, ("Test of IRQ handler for core '%s' failed\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+#endif
+#endif
+
+       if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object)) {
+               MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+
+       return irq_object;
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+       mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+       free_irq(irq_object->irqnum, irq_object);
+       kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+       irqreturn_t ret = IRQ_NONE;
+       mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+       if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data)) {
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_locks.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_locks.c
new file mode 100644 (file)
index 0000000..623e304
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk_locks.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+
+#ifdef DEBUG
+#ifdef LOCK_ORDER_CHECKING
+static DEFINE_SPINLOCK(lock_tracking_lock);
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static const char * const lock_order_to_string(_mali_osk_lock_order_t order);
+#endif /* LOCK_ORDER_CHECKING */
+
+void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+{
+       checker->orig_flags = flags;
+       checker->owner = 0;
+
+#ifdef LOCK_ORDER_CHECKING
+       checker->order = order;
+       checker->next = NULL;
+#endif
+}
+
+void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker)
+{
+       checker->owner = _mali_osk_get_tid();
+
+#ifdef LOCK_ORDER_CHECKING
+       if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+               if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) {
+                       printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n",
+                              _mali_osk_get_tid(), checker);
+                       dump_stack();
+               }
+       }
+#endif
+}
+
+void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker)
+{
+
+#ifdef LOCK_ORDER_CHECKING
+       if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+               remove_lock_from_log(checker, _mali_osk_get_tid());
+       }
+#endif
+       checker->owner = 0;
+}
+
+
+#ifdef LOCK_ORDER_CHECKING
+/* Lock order checking
+ * -------------------
+ *
+ * To assure that lock ordering scheme defined by _mali_osk_lock_order_t is strictly adhered to, the
+ * following function will, together with a linked list and some extra members in _mali_osk_lock_debug_s,
+ * make sure that a lock that is taken has a higher order than the current highest-order lock a
+ * thread holds.
+ *
+ * This is done in the following manner:
+ * - A linked list keeps track of locks held by a thread.
+ * - A `next' pointer is added to each lock. This is used to chain the locks together.
+ * - When taking a lock, the `add_lock_to_log_and_check' makes sure that taking
+ *   the given lock is legal. It will follow the linked list  to find the last
+ *   lock taken by this thread. If the last lock's order was lower than the
+ *   lock that is to be taken, it appends the new lock to the list and returns
+ *   true, if not, it return false. This return value is assert()'ed on in
+ *   _mali_osk_lock_wait().
+ */
+
+static struct _mali_osk_lock_debug_s *lock_lookup_list;
+
+static void dump_lock_tracking_list(void)
+{
+       struct _mali_osk_lock_debug_s *l;
+       u32 n = 1;
+
+       /* print list for debugging purposes */
+       l = lock_lookup_list;
+
+       while (NULL != l) {
+               printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
+               l = l->next;
+               MALI_DEBUG_ASSERT(n++ < 100);
+       }
+       printk(" NULL\n");
+}
+
+static int tracking_list_length(void)
+{
+       struct _mali_osk_lock_debug_s *l;
+       u32 n = 0;
+       l = lock_lookup_list;
+
+       while (NULL != l) {
+               l = l->next;
+               n++;
+               MALI_DEBUG_ASSERT(n < 100);
+       }
+       return n;
+}
+
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+       mali_bool ret = MALI_FALSE;
+       _mali_osk_lock_order_t highest_order_for_tid = _MALI_OSK_LOCK_ORDER_FIRST;
+       struct _mali_osk_lock_debug_s *highest_order_lock = (struct _mali_osk_lock_debug_s *)0xbeefbabe;
+       struct _mali_osk_lock_debug_s *l;
+       unsigned long local_lock_flag;
+       u32 len;
+
+       spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+       len = tracking_list_length();
+
+       l  = lock_lookup_list;
+       if (NULL == l) { /* This is the first lock taken by this thread -- record and return true */
+               lock_lookup_list = lock;
+               spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+               return MALI_TRUE;
+       } else {
+               /* Traverse the locks taken and find the lock of the highest order.
+                * Since several threads may hold locks, each lock's owner must be
+                * checked so that locks not owned by this thread can be ignored. */
+               for(;;) {
+                       MALI_DEBUG_ASSERT_POINTER( l );
+                       if (tid == l->owner && l->order >= highest_order_for_tid) {
+                               highest_order_for_tid = l->order;
+                               highest_order_lock = l;
+                       }
+
+                       if (NULL != l->next) {
+                               l = l->next;
+                       } else {
+                               break;
+                       }
+               }
+
+               l->next = lock;
+               l->next = NULL;
+       }
+
+       /* We have now found the highest order lock currently held by this thread and can see if it is
+        * legal to take the requested lock. */
+       ret = highest_order_for_tid < lock->order;
+
+       if (!ret) {
+               printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n",
+                      lock->order, lock_order_to_string(lock->order),
+                      highest_order_for_tid, lock_order_to_string(highest_order_for_tid));
+               dump_lock_tracking_list();
+       }
+
+       if (len+1 != tracking_list_length()) {
+               printk(KERN_ERR "************ lock: %p\n", lock);
+               printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+               dump_lock_tracking_list();
+               MALI_DEBUG_ASSERT_POINTER(NULL);
+       }
+
+       spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+       return ret;
+}
+
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+       struct _mali_osk_lock_debug_s *curr;
+       struct _mali_osk_lock_debug_s *prev = NULL;
+       unsigned long local_lock_flag;
+       u32 len;
+       u32 n = 0;
+
+       spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+       len = tracking_list_length();
+       curr = lock_lookup_list;
+
+       if (NULL == curr) {
+               printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
+               dump_lock_tracking_list();
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(curr);
+
+
+       while (lock != curr) {
+               prev = curr;
+
+               MALI_DEBUG_ASSERT_POINTER(curr);
+               curr = curr->next;
+               MALI_DEBUG_ASSERT(n++ < 100);
+       }
+
+       if (NULL == prev) {
+               lock_lookup_list = curr->next;
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(curr);
+               MALI_DEBUG_ASSERT_POINTER(prev);
+               prev->next = curr->next;
+       }
+
+       lock->next = NULL;
+
+       if (len-1 != tracking_list_length()) {
+               printk(KERN_ERR "************ lock: %p\n", lock);
+               printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+               dump_lock_tracking_list();
+               MALI_DEBUG_ASSERT_POINTER(NULL);
+       }
+
+       spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+}
+
+static const char * const lock_order_to_string(_mali_osk_lock_order_t order)
+{
+       switch (order) {
+       case _MALI_OSK_LOCK_ORDER_SESSIONS:
+               return "_MALI_OSK_LOCK_ORDER_SESSIONS";
+               break;
+       case _MALI_OSK_LOCK_ORDER_MEM_SESSION:
+               return "_MALI_OSK_LOCK_ORDER_MEM_SESSION";
+               break;
+       case _MALI_OSK_LOCK_ORDER_MEM_INFO:
+               return "_MALI_OSK_LOCK_ORDER_MEM_INFO";
+               break;
+       case _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE:
+               return "_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE";
+               break;
+       case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
+               return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
+               break;
+       case _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL:
+               return "_MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL";
+               break;
+       case _MALI_OSK_LOCK_ORDER_GROUP:
+               return "_MALI_OSK_LOCK_ORDER_GROUP";
+               break;
+       case _MALI_OSK_LOCK_ORDER_SCHEDULER:
+               return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
+               break;
+       case _MALI_OSK_LOCK_ORDER_PM_CORE_STATE:
+               return "_MALI_OSK_LOCK_ORDER_PM_CORE_STATE";
+               break;
+       case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+               return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+               break;
+       case _MALI_OSK_LOCK_ORDER_PROFILING:
+               return "_MALI_OSK_LOCK_ORDER_PROFILING";
+               break;
+       case _MALI_OSK_LOCK_ORDER_L2_COUNTER:
+               return "_MALI_OSK_LOCK_ORDER_L2_COUNTER";
+               break;
+       case _MALI_OSK_LOCK_ORDER_UTILIZATION:
+               return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
+               break;
+       case _MALI_OSK_LOCK_ORDER_PM_EXECUTE:
+               return "_MALI_OSK_LOCK_ORDER_PM_EXECUTE";
+               break;
+       case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
+               return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
+               break;
+       default:
+               return "";
+       }
+}
+#endif /* LOCK_ORDER_CHECKING */
+#endif /* DEBUG */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_locks.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_locks.h
new file mode 100644 (file)
index 0000000..289ed50
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.h
+ * Defines OS abstraction of lock and mutex
+ */
+#ifndef _MALI_OSK_LOCKS_H
+#define _MALI_OSK_LOCKS_H
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk_types.h"
+
+#ifdef _cplusplus
+extern "C" {
+#endif
+
+       /* When DEBUG is enabled, this struct will be used to track owner, mode and order checking */
+#ifdef DEBUG
+       struct _mali_osk_lock_debug_s {
+               u32 owner;
+               _mali_osk_lock_flags_t orig_flags;
+               _mali_osk_lock_order_t order;
+               struct _mali_osk_lock_debug_s *next;
+       };
+#endif
+
+       /* Anstraction of spinlock_t */
+       struct _mali_osk_spinlock_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+#endif
+               spinlock_t spinlock;
+       };
+
+       /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */
+       struct _mali_osk_spinlock_irq_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+#endif
+
+               spinlock_t spinlock;
+               unsigned long flags;
+       };
+
+       /* Abstraction of rw_semaphore in OS */
+       struct _mali_osk_mutex_rw_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+               _mali_osk_lock_mode_t mode;
+#endif
+
+               struct rw_semaphore rw_sema;
+       };
+
+       /* Mutex and mutex_interruptible functions share the same osk mutex struct */
+       struct _mali_osk_mutex_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+#endif
+               struct mutex mutex;
+       };
+
+#ifdef DEBUG
+       /** @brief _mali_osk_locks_debug_init/add/remove() functions are declared when DEBUG is enabled and
+        * defined in file mali_osk_locks.c. When LOCK_ORDER_CHECKING is enabled, calling these functions when we
+        * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */
+       void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order);
+       void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker);
+       void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker);
+
+       /** @brief This function can return a given lock's owner when DEBUG     is enabled. */
+       static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock)
+       {
+               return lock->owner;
+       }
+#else
+#define _mali_osk_locks_debug_init(x, y, z) do {} while (0)
+#define _mali_osk_locks_debug_add(x) do {} while (0)
+#define _mali_osk_locks_debug_remove(x) do {} while (0)
+#endif
+
+       /** @brief Before use _mali_osk_spin_lock, init function should be used to allocate memory and initial spinlock*/
+       static inline _mali_osk_spinlock_t *_mali_osk_spinlock_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_spinlock_t *lock = NULL;
+
+               lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL);
+               if (NULL == lock) {
+                       return NULL;
+               }
+               spin_lock_init(&lock->spinlock);
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief Lock a spinlock */
+       static inline void  _mali_osk_spinlock_lock(_mali_osk_spinlock_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               spin_lock(&lock->spinlock);
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+       }
+
+       /** @brief Unlock a spinlock */
+       static inline void _mali_osk_spinlock_unlock(_mali_osk_spinlock_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               spin_unlock(&lock->spinlock);
+       }
+
+       /** @brief Free a memory block which the argument lock pointed to and its type must be
+        * _mali_osk_spinlock_t *. */
+       static inline void _mali_osk_spinlock_term(_mali_osk_spinlock_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+       /** @brief Before _mali_osk_spinlock_irq_lock/unlock/term() is called, init function should be
+        * called to initial spinlock and flags in struct _mali_osk_spinlock_irq_t. */
+       static inline _mali_osk_spinlock_irq_t *_mali_osk_spinlock_irq_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_spinlock_irq_t *lock = NULL;
+               lock = kmalloc(sizeof(_mali_osk_spinlock_irq_t), GFP_KERNEL);
+
+               if (NULL == lock) {
+                       return NULL;
+               }
+
+               lock->flags = 0;
+               spin_lock_init(&lock->spinlock);
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief Lock spinlock and save the register's state */
+       static inline void _mali_osk_spinlock_irq_lock(_mali_osk_spinlock_irq_t *lock)
+       {
+               unsigned long tmp_flags;
+
+               BUG_ON(NULL == lock);
+               spin_lock_irqsave(&lock->spinlock, tmp_flags);
+               lock->flags = tmp_flags;
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+       }
+
+       /** @brief Unlock spinlock with saved register's state */
+       static inline void _mali_osk_spinlock_irq_unlock(_mali_osk_spinlock_irq_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+       }
+
+       /** @brief Destroy a given memory block which lock pointed to, and the lock type must be
+        * _mali_osk_spinlock_irq_t *. */
+       static inline void _mali_osk_spinlock_irq_term(_mali_osk_spinlock_irq_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+       /** @brief Before _mali_osk_mutex_rw_wait/signal/term() is called, we should call
+        * _mali_osk_mutex_rw_init() to kmalloc a memory block and initial part of elements in it. */
+       static inline _mali_osk_mutex_rw_t *_mali_osk_mutex_rw_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_mutex_rw_t *lock = NULL;
+
+               lock = kmalloc(sizeof(_mali_osk_mutex_rw_t), GFP_KERNEL);
+
+               if (NULL == lock) {
+                       return NULL;
+               }
+
+               init_rwsem(&lock->rw_sema);
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief When call _mali_osk_mutex_rw_wait/signal() functions, the second argument mode
+        * should be assigned with value _MALI_OSK_LOCKMODE_RO or _MALI_OSK_LOCKMODE_RW */
+       static inline void _mali_osk_mutex_rw_wait(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+       {
+               BUG_ON(NULL == lock);
+               BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+
+               if (mode == _MALI_OSK_LOCKMODE_RO) {
+                       down_read(&lock->rw_sema);
+               } else {
+                       down_write(&lock->rw_sema);
+               }
+
+#ifdef DEBUG
+               if (mode == _MALI_OSK_LOCKMODE_RW) {
+                       lock->mode = mode;
+               } else { /* mode == _MALI_OSK_LOCKMODE_RO */
+                       lock->mode = mode;
+               }
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+#endif
+       }
+
+       /** @brief Up lock->rw_sema with up_read/write() accordinf argument mode's value. */
+       static inline void  _mali_osk_mutex_rw_signal(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+       {
+               BUG_ON(NULL == lock);
+               BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+#ifdef DEBUG
+               /* make sure the thread releasing the lock actually was the owner */
+               if (mode == _MALI_OSK_LOCKMODE_RW) {
+                       _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+                       /* This lock now has no owner */
+                       lock->checker.owner = 0;
+               }
+#endif
+
+               if (mode == _MALI_OSK_LOCKMODE_RO) {
+                       up_read(&lock->rw_sema);
+               } else {
+                       up_write(&lock->rw_sema);
+               }
+       }
+
+       /** @brief Free a given memory block which lock pointed to and its type must be
+        * _mali_sok_mutex_rw_t *. */
+       static inline void _mali_osk_mutex_rw_term(_mali_osk_mutex_rw_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+       /** @brief Mutex & mutex_interruptible share the same init and term function, because they have the
+        * same osk mutex struct, and the difference between them is which locking function they use */
+       static inline _mali_osk_mutex_t *_mali_osk_mutex_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_mutex_t *lock = NULL;
+
+               lock = kmalloc(sizeof(_mali_osk_mutex_t), GFP_KERNEL);
+
+               if (NULL == lock) {
+                       return NULL;
+               }
+               mutex_init(&lock->mutex);
+
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief  Lock the lock->mutex with mutex_lock_interruptible function */
+       static inline _mali_osk_errcode_t _mali_osk_mutex_wait_interruptible(_mali_osk_mutex_t *lock)
+       {
+               _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+               BUG_ON(NULL == lock);
+
+               if (mutex_lock_interruptible(&lock->mutex)) {
+                       printk(KERN_WARNING "Mali: Can not lock mutex\n");
+                       err = _MALI_OSK_ERR_RESTARTSYSCALL;
+               }
+
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+               return err;
+       }
+
+       /** @brief Unlock the lock->mutex which is locked with mutex_lock_interruptible() function. */
+       static inline void _mali_osk_mutex_signal_interruptible(_mali_osk_mutex_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               mutex_unlock(&lock->mutex);
+       }
+
+       /** @brief Lock the lock->mutex just with mutex_lock() function which could not be interruptted. */
+       static inline void _mali_osk_mutex_wait(_mali_osk_mutex_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               mutex_lock(&lock->mutex);
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+       }
+
+       /** @brief Unlock the lock->mutex which is locked with mutex_lock() function. */
+       static inline void _mali_osk_mutex_signal(_mali_osk_mutex_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               mutex_unlock(&lock->mutex);
+       }
+
+       /** @brief Free a given memory block which lock point. */
+       static inline void _mali_osk_mutex_term(_mali_osk_mutex_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+#ifdef _cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_low_level_mem.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_low_level_mem.c
new file mode 100644 (file)
index 0000000..46e3a46
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+void _mali_osk_mem_barrier( void )
+{
+       mb();
+}
+
+void _mali_osk_write_mem_barrier( void )
+{
+       wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+       return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+       iounmap((void*)virt);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+#if MALI_LICENSE_IS_GPL
+       return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+       return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+#if !MALI_LICENSE_IS_GPL
+       release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
+{
+       __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+       return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+       iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+       /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+       _mali_osk_write_mem_barrier();
+}
+
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
+{
+#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
+       u32 retval = 0;
+       void *temp_buf;
+
+       temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL);
+       if (NULL != temp_buf) {
+               u32 bytes_left_to_copy = size;
+               u32 i;
+               for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+                       u32 size_to_copy;
+                       u32 size_copied;
+                       u32 bytes_left;
+
+                       if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+                               size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE;
+                       } else {
+                               size_to_copy = bytes_left_to_copy;
+                       }
+
+                       bytes_left = copy_from_user(temp_buf, ((char*)src) + i, size_to_copy);
+                       size_copied = size_to_copy - bytes_left;
+
+                       bytes_left = copy_to_user(((char*)dest) + i, temp_buf, size_copied);
+                       size_copied -= bytes_left;
+
+                       bytes_left_to_copy -= size_copied;
+                       retval += size_copied;
+
+                       if (size_copied != size_to_copy) {
+                               break; /* Early out, we was not able to copy this entire block */
+                       }
+               }
+
+               kfree(temp_buf);
+       }
+
+       return retval;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       if (NULL == args->ctx) {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       /* Return number of bytes actually copied */
+       args->size = _mali_osk_mem_write_safe(args->dest, args->src, args->size);
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_mali.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_mali.c
new file mode 100644 (file)
index 0000000..79111ec
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h"           /* kernel side OS functions */
+#include "mali_kernel_linux.h"
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+       int i;
+
+       if (NULL == mali_platform_device) {
+               /* Not connected to a device */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       for (i = 0; i < mali_platform_device->num_resources; i++) {
+               if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+                   mali_platform_device->resource[i].start == addr) {
+                       if (NULL != res) {
+                               res->base = addr;
+                               res->description = mali_platform_device->resource[i].name;
+
+                               /* Any (optional) IRQ resource belonging to this resource will follow */
+                               if ((i + 1) < mali_platform_device->num_resources &&
+                                   IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i+1]))) {
+                                       res->irq = mali_platform_device->resource[i+1].start;
+                               } else {
+                                       res->irq = -1;
+                               }
+                       }
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_resource_base_address(void)
+{
+       u32 lowest_addr = 0xFFFFFFFF;
+       u32 ret = 0;
+
+       if (NULL != mali_platform_device) {
+               int i;
+               for (i = 0; i < mali_platform_device->num_resources; i++) {
+                       if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+                           mali_platform_device->resource[i].start < lowest_addr) {
+                               lowest_addr = mali_platform_device->resource[i].start;
+                               ret = lowest_addr;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data)
+{
+       MALI_DEBUG_ASSERT_POINTER(data);
+
+       if (NULL != mali_platform_device) {
+               struct mali_gpu_device_data* os_data = NULL;
+
+               os_data = (struct mali_gpu_device_data*)mali_platform_device->dev.platform_data;
+               if (NULL != os_data) {
+                       /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+                       data->dedicated_mem_start = os_data->dedicated_mem_start;
+                       data->dedicated_mem_size = os_data->dedicated_mem_size;
+                       data->shared_mem_size = os_data->shared_mem_size;
+                       data->fb_start = os_data->fb_start;
+                       data->fb_size = os_data->fb_size;
+                       data->max_job_runtime = os_data->max_job_runtime;
+                       data->utilization_interval = os_data->utilization_interval;
+                       data->utilization_callback = os_data->utilization_callback;
+                       data->pmu_switch_delay = os_data->pmu_switch_delay;
+                       data->set_freq_callback = os_data->set_freq_callback;
+
+                       memcpy(data->pmu_domain_config, os_data->pmu_domain_config, sizeof(os_data->pmu_domain_config));
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+mali_bool _mali_osk_shared_interrupts(void)
+{
+       u32 irqs[128];
+       u32 i, j, irq, num_irqs_found = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources);
+
+       for (i = 0; i < mali_platform_device->num_resources; i++) {
+               if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags) {
+                       irq = mali_platform_device->resource[i].start;
+
+                       for (j = 0; j < num_irqs_found; ++j) {
+                               if (irq == irqs[j]) {
+                                       return MALI_TRUE;
+                               }
+                       }
+
+                       irqs[num_irqs_found++] = irq;
+               }
+       }
+
+       return MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_math.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_math.c
new file mode 100644 (file)
index 0000000..4e138bd
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 _mali_osk_clz( u32 input )
+{
+       return 32-fls(input);
+}
+
+u32 _mali_osk_fls( u32 input )
+{
+       return fls(input);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_memory.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_memory.c
new file mode 100644 (file)
index 0000000..5e57f97
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+       return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+       return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+       kfree(ptr);
+}
+
+void inline *_mali_osk_valloc( u32 size )
+{
+       return vmalloc(size);
+}
+
+void inline _mali_osk_vfree( void *ptr )
+{
+       vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+       return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+       return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+       /* No need to prevent an out-of-memory dialogue appearing on Linux,
+        * so we always return MALI_TRUE.
+        */
+       return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_misc.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_misc.c
new file mode 100644 (file)
index 0000000..49d1afe
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+       va_list args;
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+}
+
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
+{
+       int res;
+       va_list args;
+       va_start(args, fmt);
+
+       res = vscnprintf(buf, (size_t)size, fmt, args);
+
+       va_end(args);
+       return res;
+}
+
+void _mali_osk_abort(void)
+{
+       /* make a simple fault by dereferencing a NULL pointer */
+       dump_stack();
+       *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+       _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+       /* Thread group ID is the process ID on Linux */
+       return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+       /* pid is actually identifying the thread on Linux */
+       u32 tid = current->pid;
+
+       /* If the pid is 0 the core was idle.  Instead of returning 0 we return a special number
+        * identifying which core we are on. */
+       if (0 == tid) {
+               tid = -(1 + raw_smp_processor_id());
+       }
+
+       return tid;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_notification.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_notification.c
new file mode 100644 (file)
index 0000000..713ae36
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct {
+       spinlock_t mutex; /**< Mutex protecting the list */
+       wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+       struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct {
+       struct list_head list;           /**< Internal linked list variable */
+       _mali_osk_notification_t data;   /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+       _mali_osk_notification_queue_t *        result;
+
+       result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+       if (NULL == result) return NULL;
+
+       spin_lock_init(&result->mutex);
+       init_waitqueue_head(&result->receive_queue);
+       INIT_LIST_HEAD(&result->head);
+
+       return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+       /* OPT Recycling of notification objects */
+       _mali_osk_notification_wrapper_t *notification;
+
+       notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size,
+                      GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+       if (NULL == notification) {
+               MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+               return NULL;
+       }
+
+       /* Init the list */
+       INIT_LIST_HEAD(&notification->list);
+
+       if (0 != size) {
+               notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+       } else {
+               notification->data.result_buffer = NULL;
+       }
+
+       /* set up the non-allocating fields */
+       notification->data.notification_type = type;
+       notification->data.result_buffer_size = size;
+
+       /* all ok */
+       return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+       _mali_osk_notification_wrapper_t *notification;
+       MALI_DEBUG_ASSERT_POINTER( object );
+
+       notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+       /* Free the container */
+       kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+       _mali_osk_notification_t *result;
+       MALI_DEBUG_ASSERT_POINTER( queue );
+
+       while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) {
+               _mali_osk_notification_delete( result );
+       }
+
+       /* not much to do, just free the memory */
+       kfree(queue);
+}
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       unsigned long irq_flags;
+#endif
+
+       _mali_osk_notification_wrapper_t *notification;
+       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER( object );
+
+       notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+       spin_lock(&queue->mutex);
+#endif
+
+       list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+       spin_unlock(&queue->mutex);
+#endif
+
+       /* and wake up one possible exclusive waiter */
+       wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       unsigned long irq_flags;
+#endif
+
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+       spin_lock(&queue->mutex);
+#endif
+
+       if (!list_empty(&queue->head)) {
+               wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+               *result = &(wrapper_object->data);
+               list_del_init(&wrapper_object->list);
+               ret = _MALI_OSK_ERR_OK;
+       }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+       spin_unlock(&queue->mutex);
+#endif
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER( result );
+
+       /* default result */
+       *result = NULL;
+
+       if (wait_event_interruptible(queue->receive_queue,
+                                    _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
+               return _MALI_OSK_ERR_RESTARTSYSCALL;
+       }
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_pm.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_pm.c
new file mode 100644 (file)
index 0000000..59130a1
--- /dev/null
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static _mali_osk_atomic_t mali_pm_ref_count;
+
+void _mali_osk_pm_dev_enable(void)
+{
+       _mali_osk_atomic_init(&mali_pm_ref_count, 0);
+}
+
+void _mali_osk_pm_dev_disable(void)
+{
+       _mali_osk_atomic_term(&mali_pm_ref_count);
+}
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       int err;
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+       if (0 > err) {
+               MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       _mali_osk_atomic_inc(&mali_pm_ref_count);
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       _mali_osk_atomic_dec(&mali_pm_ref_count);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+       pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+       pm_runtime_put(&(mali_platform_device->dev));
+#endif
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+/* Can run in atomic context */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       u32 ref;
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       pm_runtime_get_noresume(&(mali_platform_device->dev));
+       ref = _mali_osk_atomic_read(&mali_pm_ref_count);
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+       return ref > 0 ? MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_TRUE;
+#endif
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+       pm_runtime_put(&(mali_platform_device->dev));
+#endif
+       MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+void _mali_osk_pm_dev_barrier(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       pm_runtime_barrier(&(mali_platform_device->dev));
+#endif
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_profiling.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_profiling.c
new file mode 100644 (file)
index 0000000..38c6fba
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+
+#include <mali_profiling_gator_api.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_pp_scheduler.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+       if (MALI_TRUE == auto_start) {
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+       /* Nothing to do */
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_profiling_get_count(void)
+{
+       return 0;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_clear(void)
+{
+       /* Nothing to do */
+       return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_osk_profiling_is_recording(void)
+{
+       return MALI_FALSE;
+}
+
+mali_bool _mali_osk_profiling_have_recording(void)
+{
+       return MALI_FALSE;
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+       trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+       return _mali_osk_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+       /* Always add process and thread identificator in the first two data elements for events from user space */
+       _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+       return _mali_osk_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+       return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+       return _mali_osk_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+       _mali_osk_profiling_report_sw_counters(args->counters);
+       return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+       if (COUNTER_VP_0_C0 == counter_id) {
+               mali_gp_job_set_gp_counter_src0(event_id);
+       } else if (COUNTER_VP_0_C1 == counter_id) {
+               mali_gp_job_set_gp_counter_src1(event_id);
+       } else if (COUNTER_FP_0_C0 <= counter_id && COUNTER_FP_7_C1 >= counter_id) {
+               /*
+                * Two compatibility notes for this function:
+                *
+                * 1) Previously the DDK allowed per core counters.
+                *
+                *    This did not make much sense on Mali-450 with the "virtual PP core" concept,
+                *    so this option was removed, and only the same pair of HW counters was allowed on all cores,
+                *    beginning with r3p2 release.
+                *
+                *    Starting with r4p0, it is now possible to set different HW counters for the different sub jobs.
+                *    This should be almost the same, since sub job 0 is designed to run on core 0,
+                *    sub job 1 on core 1, and so on.
+                *
+                *    The scheduling of PP sub jobs is not predictable, and this often led to situations where core 0 ran 2
+                *    sub jobs, while for instance core 1 ran zero. Having the counters set per sub job would thus increase
+                *    the predictability of the returned data (as you would be guaranteed data for all the selected HW counters).
+                *
+                *    PS: Core scaling needs to be disabled in order to use this reliably (goes for both solutions).
+                *
+                *    The framework/#defines with Gator still indicates that the counter is for a particular core,
+                *    but this is internally used as a sub job ID instead (no translation needed).
+                *
+                *  2) Global/default vs per sub job counters
+                *
+                *     Releases before r3p2 had only per PP core counters.
+                *     r3p2 releases had only one set of default/global counters which applied to all PP cores
+                *     Starting with r4p0, we have both a set of default/global counters,
+                *     and individual counters per sub job (equal to per core).
+                *
+                *     To keep compatibility with Gator/DS-5/streamline, the following scheme is used:
+                *
+                *     r3p2 release; only counters set for core 0 is handled,
+                *     this is applied as the default/global set of counters, and will thus affect all cores.
+                *
+                *     r4p0 release; counters set for core 0 is applied as both the global/default set of counters,
+                *     and counters for sub job 0.
+                *     Counters set for core 1-7 is only applied for the corresponding sub job.
+                *
+                *     This should allow the DS-5/Streamline GUI to have a simple mode where it only allows setting the
+                *     values for core 0, and thus this will be applied to all PP sub jobs/cores.
+                *     Advanced mode will also be supported, where individual pairs of HW counters can be selected.
+                *
+                *     The GUI will (until it is updated) still refer to cores instead of sub jobs, but this is probably
+                *     something we can live with!
+                *
+                *     Mali-450 note: Each job is not divided into a deterministic number of sub jobs, as the HW DLBU
+                *     automatically distributes the load between whatever number of cores is available at this particular time.
+                *     A normal PP job on Mali-450 is thus considered a single (virtual) job, and it will thus only be possible
+                *     to use a single pair of HW counters (even if the job ran on multiple PP cores).
+                *     In other words, only the global/default pair of PP HW counters will be used for normal Mali-450 jobs.
+                */
+               u32 sub_job = (counter_id - COUNTER_FP_0_C0) >> 1;
+               u32 counter_src = (counter_id - COUNTER_FP_0_C0) & 1;
+               if (0 == counter_src) {
+                       mali_pp_job_set_pp_counter_sub_job_src0(sub_job, event_id);
+                       if (0 == sub_job) {
+                               mali_pp_job_set_pp_counter_global_src0(event_id);
+                       }
+               } else {
+                       mali_pp_job_set_pp_counter_sub_job_src1(sub_job, event_id);
+                       if (0 == sub_job) {
+                               mali_pp_job_set_pp_counter_global_src1(event_id);
+                       }
+               }
+       } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) {
+               u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
+               struct mali_l2_cache_core* l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+               if (NULL != l2_cache_core) {
+                       u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
+                       if (0 == counter_src) {
+                               mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id);
+                       } else {
+                               mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id);
+                       }
+               }
+       } else {
+               return 0; /* Failure, unknown event */
+       }
+
+       return 1; /* success */
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param values Pointer to a _mali_profiling_l2_counter_values structure where
+ *               the counter sources and values will be output
+ * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores
+ */
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
+{
+       struct mali_l2_cache_core *l2_cache;
+       u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
+       u32 i;
+       u32 ret = 0;
+
+       MALI_DEBUG_ASSERT(l2_cores_num <= 3);
+
+       for (i = 0; i < l2_cores_num; i++) {
+               l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+
+               if (NULL == l2_cache) {
+                       continue;
+               }
+
+               if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache)) {
+                       /* It is now safe to access the L2 cache core in order to retrieve the counters */
+                       mali_l2_cache_core_get_counter_values(l2_cache,
+                                                             &values->cores[i].source0,
+                                                             &values->cores[i].value0,
+                                                             &values->cores[i].source1,
+                                                             &values->cores[i].value1);
+               } else {
+                       /* The core was not available, set the right bit in the mask. */
+                       ret |= (1 << i);
+               }
+               mali_l2_cache_unlock_power_state(l2_cache);
+       }
+
+       return ret;
+}
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+       switch(action) {
+       case FBDUMP_CONTROL_ENABLE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+               break;
+       case FBDUMP_CONTROL_RATE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+               break;
+       case SW_COUNTER_ENABLE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+               break;
+       case FBDUMP_CONTROL_RESIZE_FACTOR:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+               break;
+       default:
+               break;  /* Ignore unimplemented actions */
+       }
+}
+
+/**
+ * Called by gator to get mali api version.
+ */
+u32 _mali_profiling_get_api_version(void)
+{
+       return MALI_PROFILING_API_VERSION;
+}
+
+/**
+* Called by gator to get the data about Mali instance in use:
+* product id, version, number of cores
+*/
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values)
+{
+       values->mali_product_id = (u32)mali_kernel_core_get_product_id();
+       values->mali_version_major = mali_kernel_core_get_gpu_major_version();
+       values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
+       values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+       values->num_of_fp_cores = mali_pp_scheduler_get_num_cores_total();
+       values->num_of_vp_cores = 1;
+}
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
+EXPORT_SYMBOL(_mali_profiling_get_api_version);
+EXPORT_SYMBOL(_mali_profiling_get_mali_version);
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_specific.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_specific.h
new file mode 100644 (file)
index 0000000..db87b07
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/dmapool.h>
+#include <linux/gfp.h>
+#include <linux/hardirq.h>
+
+#include "mali_osk_types.h"
+#include "mali_kernel_linux.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+typedef struct dma_pool * mali_dma_pool;
+
+
+MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
+{
+       return dma_pool_create("mali-dma", &mali_platform_device->dev, size, alignment, boundary);
+}
+
+MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
+{
+       dma_pool_destroy(pool);
+}
+
+MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, u32 *phys_addr)
+{
+       return dma_pool_alloc(pool, GFP_KERNEL, phys_addr);
+}
+
+MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void* virt_addr, u32 phys_addr)
+{
+       dma_pool_free(pool, virt_addr, phys_addr);
+}
+
+
+#if MALI_ENABLE_CPU_CYCLES
+/* Reads out the clock cycle performance counter of the current cpu.
+   It is useful for cost-free (2 cycle) measuring of the time spent
+   in a code path. Sample before and after, the diff number of cycles.
+   When the CPU is idle it will not increase this clock counter.
+   It means that the counter is accurate if only spin-locks are used,
+   but mutexes may lead to too low values since the cpu might "idle"
+   waiting for the mutex to become available.
+   The clock source is configured on the CPU during mali module load,
+   but will not give useful output after a CPU has been power cycled.
+   It is therefore important to configure the system to not turn of
+   the cpu cores when using this functionallity.*/
+static inline unsigned int mali_get_cpu_cyclecount(void)
+{
+       unsigned int value;
+       /* Reading the CCNT Register - CPU clock counter */
+       asm volatile ("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
+       return value;
+}
+
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64);
+#endif
+
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+       return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+MALI_STATIC_INLINE mali_bool _mali_osk_in_atomic(void)
+{
+       return in_atomic();
+}
+
+#define _mali_osk_put_user(x, ptr) put_user(x, ptr)
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_time.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_time.c
new file mode 100644 (file)
index 0000000..6022239
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int    _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+       return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32    _mali_osk_time_mstoticks( u32 ms )
+{
+       return msecs_to_jiffies(ms);
+}
+
+u32    _mali_osk_time_tickstoms( u32 ticks )
+{
+       return jiffies_to_msecs(ticks);
+}
+
+u32    _mali_osk_time_tickcount( void )
+{
+       return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+       udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+       struct timespec tsval;
+       getnstimeofday(&tsval);
+       return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_timers.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_timers.c
new file mode 100644 (file)
index 0000000..421113f
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct {
+       struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+       _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+       if (NULL != t) init_timer(&t->timer);
+       return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       tim->timer.expires = jiffies + ticks_to_expire;
+       add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async( _mali_osk_timer_t *tim )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       tim->timer.data = (unsigned long)data;
+       tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       kfree(tim);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_wait_queue.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_wait_queue.c
new file mode 100644 (file)
index 0000000..f44dadb
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct {
+       wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
+{
+       _mali_osk_wait_queue_t* ret = NULL;
+
+       ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+       if (NULL == ret) {
+               return ret;
+       }
+
+       init_waitqueue_head(&ret->wait_queue);
+       MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+       return ret;
+}
+
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data )
+{
+       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+       wait_event(queue->wait_queue, condition(data));
+}
+
+void _mali_osk_wait_queue_wait_event_timeout( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data, u32 timeout )
+{
+       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+       wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
+}
+
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
+{
+       MALI_DEBUG_ASSERT_POINTER( queue );
+
+       /* if queue is empty, don't attempt to wake up its elements */
+       if (!waitqueue_active(&queue->wait_queue)) return;
+
+       MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+       wake_up_all(&queue->wait_queue);
+
+       MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue )
+{
+       /* Parameter validation  */
+       MALI_DEBUG_ASSERT_POINTER( queue );
+
+       /* Linux requires no explicit termination of wait queues */
+       kfree(queue);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_wq.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_osk_wq.c
new file mode 100644 (file)
index 0000000..0784949
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h>        /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_s {
+       _mali_osk_wq_work_handler_t handler;
+       void *data;
+       mali_bool high_pri;
+       struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+typedef struct _mali_osk_wq_delayed_work_s {
+       _mali_osk_wq_work_handler_t handler;
+       void *data;
+       struct delayed_work work;
+} mali_osk_wq_delayed_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+struct workqueue_struct *mali_wq_normal = NULL;
+struct workqueue_struct *mali_wq_high = NULL;
+#endif
+
+static void _mali_osk_wq_work_func(struct work_struct *work);
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+       MALI_DEBUG_ASSERT(NULL == mali_wq_normal);
+       MALI_DEBUG_ASSERT(NULL == mali_wq_high);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+       mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0);
+       mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI, 0);
+#else
+       mali_wq_normal = create_workqueue("mali");
+       mali_wq_high = create_workqueue("mali_high_pri");
+#endif
+       if (NULL == mali_wq_normal || NULL == mali_wq_high) {
+               MALI_PRINT_ERROR(("Unable to create Mali workqueues\n"));
+
+               if (mali_wq_normal) destroy_workqueue(mali_wq_normal);
+               if (mali_wq_high)   destroy_workqueue(mali_wq_high);
+
+               mali_wq_normal = NULL;
+               mali_wq_high   = NULL;
+
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif /* MALI_LICENSE_IS_GPL */
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+       flush_workqueue(mali_wq_high);
+       flush_workqueue(mali_wq_normal);
+#else
+       flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+       MALI_DEBUG_ASSERT(NULL != mali_wq_normal);
+       MALI_DEBUG_ASSERT(NULL != mali_wq_high);
+
+       flush_workqueue(mali_wq_normal);
+       destroy_workqueue(mali_wq_normal);
+
+       flush_workqueue(mali_wq_high);
+       destroy_workqueue(mali_wq_high);
+
+       mali_wq_normal = NULL;
+       mali_wq_high   = NULL;
+#else
+       flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data )
+{
+       mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+       work->high_pri = MALI_FALSE;
+
+       INIT_WORK( &work->work_handle, _mali_osk_wq_work_func);
+
+       return work;
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri( _mali_osk_wq_work_handler_t handler, void *data )
+{
+       mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+       work->high_pri = MALI_TRUE;
+
+       INIT_WORK( &work->work_handle, _mali_osk_wq_work_func );
+
+       return work;
+}
+
+void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work )
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+       _mali_osk_wq_flush();
+       kfree(work_object);
+}
+
+void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work )
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+       kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work )
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+       queue_work(mali_wq_normal, &work_object->work_handle);
+#else
+       schedule_work(&work_object->work_handle);
+#endif
+}
+
+void _mali_osk_wq_schedule_work_high_pri( _mali_osk_wq_work_t *work )
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+       queue_work(mali_wq_high, &work_object->work_handle);
+#else
+       schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func( struct work_struct *work )
+{
+       mali_osk_wq_work_object_t *work_object;
+
+       work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+
+       /* We want higher priority than the Dynamic Priority, setting it to the lowest of the RT priorities */
+       if (MALI_TRUE == work_object->high_pri) {
+               set_user_nice(current, -19);
+       }
+
+       work_object->handler(work_object->data);
+}
+
+static void _mali_osk_wq_delayed_work_func( struct work_struct *work )
+{
+       mali_osk_wq_delayed_work_object_t *work_object;
+
+       work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_delayed_work_object_t, work.work);
+       work_object->handler(work_object->data);
+}
+
+mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work( _mali_osk_wq_work_handler_t handler, void *data)
+{
+       mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+
+       INIT_DELAYED_WORK(&work->work, _mali_osk_wq_delayed_work_func);
+
+       return work;
+}
+
+void _mali_osk_wq_delayed_delete_work_nonflush( _mali_osk_wq_delayed_work_t *work )
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+       kfree(work_object);
+}
+
+void _mali_osk_wq_delayed_cancel_work_async( _mali_osk_wq_delayed_work_t *work )
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+       cancel_delayed_work(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_cancel_work_sync( _mali_osk_wq_delayed_work_t *work )
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+       cancel_delayed_work_sync(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_schedule_work( _mali_osk_wq_delayed_work_t *work, u32 delay )
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+
+#if MALI_LICENSE_IS_GPL
+       queue_delayed_work(mali_wq_normal, &work_object->work, delay);
+#else
+       schedule_delayed_work(&work_object->work, delay);
+#endif
+
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_pmu_power_up_down.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_pmu_power_up_down.c
new file mode 100644 (file)
index 0000000..c1b1c01
--- /dev/null
@@ -0,0 +1,71 @@
+/**
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_pmu.h"
+#include "mali_pp_scheduler.h"
+#include "linux/mali/mali_utgard.h"
+
+/* Mali PMU power up/down APIs */
+
+int mali_pmu_powerup(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       if (NULL == pmu) {
+               return -ENXIO;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_pmu_power_up_all(pmu)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerup);
+
+int mali_pmu_powerdown(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       if (NULL == pmu) {
+               return -ENXIO;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_pmu_power_down_all(pmu)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerdown);
+
+int mali_perf_set_num_pp_cores(unsigned int num_cores)
+{
+       return mali_pp_scheduler_set_perf_level(num_cores, MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_events.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_events.h
new file mode 100644 (file)
index 0000000..2639a40
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_gator_api.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_gator_api.h
new file mode 100644 (file)
index 0000000..c111cfd
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_GATOR_API_H__
+#define __MALI_PROFILING_GATOR_API_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_gator_api.h>
+
+#endif /* __MALI_PROFILING_GATOR_API_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_internal.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_internal.c
new file mode 100644 (file)
index 0000000..63e2e32
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry {
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+} mali_profiling_entry;
+
+typedef enum mali_profiling_state {
+       MALI_PROFILING_STATE_UNINITIALIZED,
+       MALI_PROFILING_STATE_IDLE,
+       MALI_PROFILING_STATE_RUNNING,
+       MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_mutex_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+                               int d2, unsigned int d3, unsigned int d4))
+{
+       add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+       profile_entries = NULL;
+       profile_mask = 0;
+       _mali_osk_atomic_init(&profile_insert_index, 0);
+
+       lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PROFILING);
+       if (NULL == lock) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       prof_state = MALI_PROFILING_STATE_IDLE;
+
+       if (MALI_TRUE == auto_start) {
+               u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+       u32 count;
+
+       /* Ensure profiling is stopped */
+       _mali_internal_profiling_stop(&count);
+
+       prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+       if (NULL != profile_entries) {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       if (NULL != lock) {
+               _mali_osk_mutex_term(lock);
+               lock = NULL;
+       }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
+{
+       _mali_osk_errcode_t ret;
+       mali_profiling_entry *new_profile_entries;
+
+       _mali_osk_mutex_wait(lock);
+
+       if (MALI_PROFILING_STATE_RUNNING == prof_state) {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+       if (NULL == new_profile_entries) {
+               _mali_osk_vfree(new_profile_entries);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) {
+               *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+       }
+
+       profile_mask = 1;
+       while (profile_mask <= *limit) {
+               profile_mask <<= 1;
+       }
+       profile_mask >>= 1;
+
+       *limit = profile_mask;
+
+       profile_mask--; /* turns the power of two into a mask of one less */
+
+       if (MALI_PROFILING_STATE_IDLE != prof_state) {
+               _mali_osk_mutex_signal(lock);
+               _mali_osk_vfree(new_profile_entries);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       profile_entries = new_profile_entries;
+
+       ret = _mali_timestamp_reset();
+
+       if (_MALI_OSK_ERR_OK == ret) {
+               prof_state = MALI_PROFILING_STATE_RUNNING;
+       } else {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+       _mali_osk_mutex_signal(lock);
+       return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+       u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+       profile_entries[cur_index].timestamp = _mali_timestamp_get();
+       profile_entries[cur_index].event_id = event_id;
+       profile_entries[cur_index].data[0] = data0;
+       profile_entries[cur_index].data[1] = data1;
+       profile_entries[cur_index].data[2] = data2;
+       profile_entries[cur_index].data[3] = data3;
+       profile_entries[cur_index].data[4] = data4;
+
+       /* If event is "leave API function", add current memory usage to the event
+        * as data point 4.  This is used in timeline profiling to indicate how
+        * much memory was used when leaving a function. */
+       if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
+               profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+       }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 * count)
+{
+       _mali_osk_mutex_wait(lock);
+
+       if (MALI_PROFILING_STATE_RUNNING != prof_state) {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       /* go into return state (user to retreive events), no more events will be added after this */
+       prof_state = MALI_PROFILING_STATE_RETURN;
+
+       unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+       _mali_osk_mutex_signal(lock);
+
+       tracepoint_synchronize_unregister();
+
+       *count = _mali_osk_atomic_read(&profile_insert_index);
+       if (*count > profile_mask) *count = profile_mask;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+       u32 retval = 0;
+
+       _mali_osk_mutex_wait(lock);
+       if (MALI_PROFILING_STATE_RETURN == prof_state) {
+               retval = _mali_osk_atomic_read(&profile_insert_index);
+               if (retval > profile_mask) retval = profile_mask;
+       }
+       _mali_osk_mutex_signal(lock);
+
+       return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+       u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+       _mali_osk_mutex_wait(lock);
+
+       if (index < profile_mask) {
+               if ((raw_index & ~profile_mask) != 0) {
+                       index += raw_index;
+                       index &= profile_mask;
+               }
+
+               if (prof_state != MALI_PROFILING_STATE_RETURN) {
+                       _mali_osk_mutex_signal(lock);
+                       return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+               }
+
+               if(index >= raw_index) {
+                       _mali_osk_mutex_signal(lock);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               *timestamp = profile_entries[index].timestamp;
+               *event_id = profile_entries[index].event_id;
+               data[0] = profile_entries[index].data[0];
+               data[1] = profile_entries[index].data[1];
+               data[2] = profile_entries[index].data[2];
+               data[3] = profile_entries[index].data[3];
+               data[4] = profile_entries[index].data[4];
+       } else {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       _mali_osk_mutex_signal(lock);
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+       _mali_osk_mutex_wait(lock);
+
+       if (MALI_PROFILING_STATE_RETURN != prof_state) {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       prof_state = MALI_PROFILING_STATE_IDLE;
+       profile_mask = 0;
+       _mali_osk_atomic_init(&profile_insert_index, 0);
+
+       if (NULL != profile_entries) {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       _mali_osk_mutex_signal(lock);
+       return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+       return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+       return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_internal.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_profiling_internal.h
new file mode 100644 (file)
index 0000000..826ab09
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 * count);
+int _mali_internal_profiling_start(u32 * limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_sync.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_sync.c
new file mode 100644 (file)
index 0000000..34a155d
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_sync.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_timeline.h"
+
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+struct mali_sync_pt {
+       struct sync_pt         sync_pt;
+       struct mali_sync_flag *flag;
+};
+
+/**
+ * The sync flag is used to connect sync fences to the Mali Timeline system.  Sync fences can be
+ * created from a sync flag, and when the flag is signaled, the sync fences will also be signaled.
+ */
+struct mali_sync_flag {
+       struct sync_timeline *sync_tl;  /**< Sync timeline this flag is connected to. */
+       u32                   point;    /**< Point on timeline. */
+       int                   status;   /**< 0 if unsignaled, 1 if signaled without error or negative if signaled with error. */
+       struct kref           refcount; /**< Reference count. */
+};
+
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+       return container_of(pt, struct mali_sync_pt, sync_pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+       struct mali_sync_pt *mpt, *new_mpt;
+       struct sync_pt *new_pt;
+
+       MALI_DEBUG_ASSERT_POINTER(pt);
+       mpt = to_mali_sync_pt(pt);
+
+       new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+       if (NULL == new_pt) return NULL;
+
+       new_mpt = to_mali_sync_pt(new_pt);
+
+       mali_sync_flag_get(mpt->flag);
+       new_mpt->flag = mpt->flag;
+
+       return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(pt);
+       mpt = to_mali_sync_pt(pt);
+
+       MALI_DEBUG_ASSERT_POINTER(mpt->flag);
+
+       return mpt->flag->status;
+}
+
+static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb)
+{
+       struct mali_sync_pt *mpta;
+       struct mali_sync_pt *mptb;
+       u32 a, b;
+
+       MALI_DEBUG_ASSERT_POINTER(pta);
+       MALI_DEBUG_ASSERT_POINTER(ptb);
+       mpta = to_mali_sync_pt(pta);
+       mptb = to_mali_sync_pt(ptb);
+
+       MALI_DEBUG_ASSERT_POINTER(mpta->flag);
+       MALI_DEBUG_ASSERT_POINTER(mptb->flag);
+
+       a = mpta->flag->point;
+       b = mptb->flag->point;
+
+       if (a == b) return 0;
+
+       return ((b - a) < (a - b) ? -1 : 1);
+}
+
+static void timeline_free_pt(struct sync_pt *pt)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(pt);
+       mpt = to_mali_sync_pt(pt);
+
+       mali_sync_flag_put(mpt->flag);
+}
+
+static void timeline_release(struct sync_timeline *sync_timeline)
+{
+       module_put(THIS_MODULE);
+}
+
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(s);
+       MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+       mpt = to_mali_sync_pt(sync_pt);
+       MALI_DEBUG_ASSERT_POINTER(mpt->flag);
+
+       seq_printf(s, "%u", mpt->flag->point);
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+       .driver_name    = "Mali",
+       .dup            = timeline_dup,
+       .has_signaled   = timeline_has_signaled,
+       .compare        = timeline_compare,
+       .free_pt        = timeline_free_pt,
+       .release_obj    = timeline_release,
+       .print_pt       = timeline_print_pt,
+};
+
+struct sync_timeline *mali_sync_timeline_create(const char *name)
+{
+       struct sync_timeline *sync_tl;
+
+       sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct sync_timeline), name);
+       if (NULL == sync_tl) return NULL;
+
+       /* Grab a reference on the module to ensure the callbacks are present
+        * as long some timeline exists. The reference is released when the
+        * timeline is freed.
+        * Since this function is called from a ioctl on an open file we know
+        * we already have a reference, so using __module_get is safe. */
+       __module_get(THIS_MODULE);
+
+       return sync_tl;
+}
+
+mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl)
+{
+       MALI_DEBUG_ASSERT_POINTER(sync_tl);
+       return (sync_tl->ops == &mali_timeline_ops) ? MALI_TRUE : MALI_FALSE;
+}
+
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
+{
+       s32 fd = -1;
+
+       fd = get_unused_fd();
+       if (fd < 0) {
+               sync_fence_put(sync_fence);
+               return -1;
+       }
+       sync_fence_install(sync_fence, fd);
+
+       return fd;
+}
+
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2)
+{
+       struct sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+       sync_fence = sync_fence_merge("mali_merge_fence", sync_fence1, sync_fence2);
+       sync_fence_put(sync_fence1);
+       sync_fence_put(sync_fence2);
+
+       return sync_fence;
+}
+
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl)
+{
+       struct mali_sync_flag *flag;
+       struct sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+       flag = mali_sync_flag_create(sync_tl, 0);
+       if (NULL == flag) return NULL;
+
+       sync_fence = mali_sync_flag_create_fence(flag);
+
+       mali_sync_flag_signal(flag, 0);
+       mali_sync_flag_put(flag);
+
+       return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, mali_timeline_point point)
+{
+       struct mali_sync_flag *flag;
+
+       if (NULL == sync_tl) return NULL;
+
+       flag = _mali_osk_calloc(1, sizeof(*flag));
+       if (NULL == flag) return NULL;
+
+       flag->sync_tl = sync_tl;
+       flag->point = point;
+
+       flag->status = 0;
+       kref_init(&flag->refcount);
+
+       return flag;
+}
+
+void mali_sync_flag_get(struct mali_sync_flag *flag)
+{
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       kref_get(&flag->refcount);
+}
+
+/**
+ * Free sync flag.
+ *
+ * @param ref kref object embedded in sync flag that should be freed.
+ */
+static void mali_sync_flag_free(struct kref *ref)
+{
+       struct mali_sync_flag *flag;
+
+       MALI_DEBUG_ASSERT_POINTER(ref);
+       flag = container_of(ref, struct mali_sync_flag, refcount);
+
+       _mali_osk_free(flag);
+}
+
+void mali_sync_flag_put(struct mali_sync_flag *flag)
+{
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       kref_put(&flag->refcount, mali_sync_flag_free);
+}
+
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error)
+{
+       MALI_DEBUG_ASSERT_POINTER(flag);
+
+       MALI_DEBUG_ASSERT(0 == flag->status);
+       flag->status = (0 > error) ? error : 1;
+
+       _mali_osk_write_mem_barrier();
+
+       sync_timeline_signal(flag->sync_tl);
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+       struct sync_pt *pt;
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+       pt = sync_pt_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+       if (NULL == pt) return NULL;
+
+       mali_sync_flag_get(flag);
+
+       mpt = to_mali_sync_pt(pt);
+       mpt->flag = flag;
+
+       return pt;
+}
+
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+       struct sync_pt    *sync_pt;
+       struct sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+       sync_pt = mali_sync_flag_create_pt(flag);
+       if (NULL == sync_pt) return NULL;
+
+       sync_fence = sync_fence_create("mali_flag_fence", sync_pt);
+       if (NULL == sync_fence) {
+               sync_pt_free(sync_pt);
+               return NULL;
+       }
+
+       return sync_fence;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_sync.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_sync.h
new file mode 100644 (file)
index 0000000..c5afcea
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ * Mali interface for Linux sync objects.
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#if defined(CONFIG_SYNC)
+
+#include <linux/seq_file.h>
+#include <linux/sync.h>
+
+#include "mali_osk.h"
+
+struct mali_sync_flag;
+
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct sync_timeline *mali_sync_timeline_create(const char *name);
+
+/**
+ * Check if sync timeline belongs to Mali.
+ *
+ * @param sync_tl Sync timeline to check.
+ * @return MALI_TRUE if sync timeline belongs to Mali, MALI_FALSE if not.
+ */
+mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl);
+
+/**
+ * Creates a file descriptor representing the sync fence.  Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences.  Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl);
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, u32 point);
+
+/**
+ * Grab sync flag reference.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_get(struct mali_sync_flag *flag);
+
+/**
+ * Release sync flag reference.  If this was the last reference, the sync flag will be freed.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_put(struct mali_sync_flag *flag);
+
+/**
+ * Signal sync flag.  All sync fences created from this flag will be signaled.
+ *
+ * @param flag Sync flag to signal.
+ * @param error Negative error code, or 0 if no error.
+ */
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* _MALI_SYNC_H_ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_uk_types.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_uk_types.h
new file mode 100644 (file)
index 0000000..fbe902a
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_core.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_core.c
new file mode 100644 (file)
index 0000000..a7db905
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/slab.h>     /* memort allocation functions */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+       _mali_uk_get_api_version_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_get_api_version(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+       if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+       return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+       _mali_uk_wait_for_notification_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_wait_for_notification(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
+               kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+               if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+       } else {
+               if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+       }
+
+       return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+       _mali_uk_post_notification_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+
+       if (0 != get_user(kargs.type, &uargs->type)) {
+               return -EFAULT;
+       }
+
+       err = _mali_ukk_post_notification(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+       _mali_uk_get_user_settings_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_get_user_settings(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+       return 0;
+}
+
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs)
+{
+       _mali_uk_request_high_priority_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_request_high_priority(&kargs);
+
+       kargs.ctx = NULL;
+
+       return map_errcode(err);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_gp.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_gp.c
new file mode 100644 (file)
index 0000000..d35f680
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       /* If the job was started successfully, 0 is returned.  If there was an error, but the job
+        * was started, we return -ENOENT.  For anything else returned, the job was not started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_gp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+       _mali_uk_get_gp_core_version_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = session_data;
+       err =  _mali_ukk_get_gp_core_version(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* no known transactions to roll-back */
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+       _mali_uk_gp_suspend_response_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_gp_suspend_response(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+       /* no known transactions to roll-back */
+       return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+       _mali_uk_get_gp_number_of_cores_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_get_gp_number_of_cores(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* no known transactions to roll-back */
+
+       if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+       return 0;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_mem.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_mem.c
new file mode 100644 (file)
index 0000000..1723b10
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user * uargs)
+{
+       _mali_uk_mem_write_safe_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+
+       /* Check if we can access the buffers */
+       if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+           || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+               return -EINVAL;
+       }
+
+       /* Check if size wraps */
+       if ((kargs.size + kargs.dest) <= kargs.dest
+           || (kargs.size + kargs.src) <= kargs.src) {
+               return -EINVAL;
+       }
+
+       err = _mali_ukk_mem_write_safe(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.size, &uargs->size)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+       _mali_uk_map_external_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+       MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) ) {
+               return -EFAULT;
+       }
+
+       uk_args.ctx = session_data;
+       err_code = _mali_ukk_map_external_mem( &uk_args );
+
+       if (0 != put_user(uk_args.cookie, &argument->cookie)) {
+               if (_MALI_OSK_ERR_OK == err_code) {
+                       /* Rollback */
+                       _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+                       uk_args_unmap.ctx = session_data;
+                       uk_args_unmap.cookie = uk_args.cookie;
+                       err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+                       if (_MALI_OSK_ERR_OK != err_code) {
+                               MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+                       }
+               }
+               return -EFAULT;
+       }
+
+       /* Return the error that _mali_ukk_free_big_block produced */
+       return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+       _mali_uk_unmap_external_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+       MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) ) {
+               return -EFAULT;
+       }
+
+       uk_args.ctx = session_data;
+       err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+       /* Return the error that _mali_ukk_free_big_block produced */
+       return map_errcode(err_code);
+}
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+       _mali_uk_release_ump_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+       MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) ) {
+               return -EFAULT;
+       }
+
+       uk_args.ctx = session_data;
+       err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+       /* Return the error that _mali_ukk_free_big_block produced */
+       return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+       _mali_uk_attach_ump_mem_s uk_args;
+       _mali_osk_errcode_t err_code;
+
+       /* validate input */
+       /* the session_data pointer was validated by caller */
+       MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) ) {
+               return -EFAULT;
+       }
+
+       uk_args.ctx = session_data;
+       err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+       if (0 != put_user(uk_args.cookie, &argument->cookie)) {
+               if (_MALI_OSK_ERR_OK == err_code) {
+                       /* Rollback */
+                       _mali_uk_release_ump_mem_s uk_args_unmap;
+
+                       uk_args_unmap.ctx = session_data;
+                       uk_args_unmap.cookie = uk_args.cookie;
+                       err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+                       if (_MALI_OSK_ERR_OK != err_code) {
+                               MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+                       }
+               }
+               return -EFAULT;
+       }
+
+       /* Return the error that _mali_ukk_map_external_ump_mem produced */
+       return map_errcode(err_code);
+}
+#endif /* CONFIG_MALI400_UMP */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+       _mali_uk_query_mmu_page_table_dump_size_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = session_data;
+
+       err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+       return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+       _mali_uk_dump_mmu_page_table_s kargs;
+       _mali_osk_errcode_t err;
+       void *buffer;
+       int rc = -EFAULT;
+
+       /* validate input */
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       /* the session_data pointer was validated by caller */
+
+       kargs.buffer = NULL;
+
+       /* get location of user buffer */
+       if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+       /* get size of mmu page table info buffer from user space */
+       if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+       /* verify we can access the whole of the user buffer */
+       if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+       /* allocate temporary buffer (kernel side) to store mmu page table info */
+       MALI_CHECK(kargs.size > 0, -ENOMEM);
+       kargs.buffer = _mali_osk_valloc(kargs.size);
+       if (NULL == kargs.buffer) {
+               rc = -ENOMEM;
+               goto err_exit;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_dump_mmu_page_table(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               rc = map_errcode(err);
+               goto err_exit;
+       }
+
+       /* copy mmu page table info back to user space and update pointers */
+       if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+       if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+       if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+       if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+       if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+       rc = 0;
+
+err_exit:
+       if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+       return rc;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_pp.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_pp.c
new file mode 100644 (file)
index 0000000..88c27f4
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       /* If the job was started successfully, 0 is returned.  If there was an error, but the job
+        * was started, we return -ENOENT.  For anything else returned, the job was not started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_pp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       /* If the jobs were started successfully, 0 is returned.  If there was an error, but the
+        * jobs were started, we return -ENOENT.  For anything else returned, the jobs were not
+        * started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_pp_and_gp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+       _mali_uk_get_pp_number_of_cores_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = session_data;
+
+       err = _mali_ukk_get_pp_number_of_cores(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+       _mali_uk_get_pp_core_version_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_get_pp_core_version(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+       _mali_uk_pp_disable_wb_s kargs;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+       kargs.ctx = session_data;
+       _mali_ukk_pp_job_disable_wb(&kargs);
+
+       return 0;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_profiling.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_profiling.c
new file mode 100644 (file)
index 0000000..6f0854f
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+       _mali_uk_profiling_start_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_start(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.limit, &uargs->limit)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+       _mali_uk_profiling_add_event_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_add_event(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+       _mali_uk_profiling_stop_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_stop(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.count, &uargs->count)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+       _mali_uk_profiling_get_event_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.index, &uargs->index)) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+
+       err = _mali_ukk_profiling_get_event(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s))) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+       _mali_uk_profiling_clear_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_profiling_clear(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+       _mali_uk_sw_counters_report_s kargs;
+       _mali_osk_errcode_t err;
+       u32 *counter_buffer;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
+               return -EFAULT;
+       }
+
+       /* make sure that kargs.num_counters is [at least somewhat] sane */
+       if (kargs.num_counters > 10000) {
+               MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+               return -EINVAL;
+       }
+
+       counter_buffer = (u32*)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+       if (NULL == counter_buffer) {
+               return -ENOMEM;
+       }
+
+       if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters)) {
+               kfree(counter_buffer);
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       kargs.counters = counter_buffer;
+
+       err = _mali_ukk_sw_counters_report(&kargs);
+
+       kfree(counter_buffer);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_soft_job.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_soft_job.c
new file mode 100644 (file)
index 0000000..7f97be0
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_soft_job.h"
+#include "mali_timeline.h"
+
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
+{
+       u32 type, user_job, point;
+       _mali_uk_fence_t uk_fence;
+       struct mali_timeline_fence fence;
+       struct mali_soft_job *job = NULL;
+       u32 __user *job_id_ptr = NULL;
+
+       /* If the job was started successfully, 0 is returned.  If there was an error, but the job
+        * was started, we return -ENOENT.  For anything else returned, the job was not started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session, -EINVAL);
+
+       MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+
+       if (0 != get_user(type, &uargs->type))                 return -EFAULT;
+       if (0 != get_user(user_job, &uargs->user_job))         return -EFAULT;
+       if (0 != get_user(job_id_ptr, &uargs->job_id_ptr))     return -EFAULT;
+
+       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED < type) {
+               MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
+               return -EINVAL;
+       }
+
+       /* Create soft job. */
+       job = mali_soft_job_create(session->soft_job_system, (enum mali_soft_job_type)type, user_job);
+       if (unlikely(NULL == job)) {
+               return map_errcode(_MALI_OSK_ERR_NOMEM);
+       }
+
+       /* Write job id back to user space. */
+       if (0 != put_user(job->id, job_id_ptr)) {
+               MALI_PRINT_ERROR(("Mali Soft Job: failed to put job id"));
+               mali_soft_job_destroy(job);
+               return map_errcode(_MALI_OSK_ERR_NOMEM);
+       }
+
+       /* Start soft job. */
+       point = mali_soft_job_start(job, &fence);
+
+       if (0 != put_user(point, &uargs->point)) {
+               /* Let user space know that something failed after the job was started. */
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs)
+{
+       u32 job_id;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != get_user(job_id, &uargs->job_id)) return -EFAULT;
+
+       err = mali_soft_job_system_signal_job(session->soft_job_system, job_id);
+
+       return map_errcode(err);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_timeline.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_timeline.c
new file mode 100644 (file)
index 0000000..0efd1d8
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_timeline.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs)
+{
+       u32 val;
+       mali_timeline_id timeline;
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != get_user(val, &uargs->timeline)) return -EFAULT;
+
+       if (MALI_UK_TIMELINE_MAX <= val) {
+               return -EINVAL;
+       }
+
+       timeline = (mali_timeline_id)val;
+
+       point = mali_timeline_system_get_latest_point(session->timeline_system, timeline);
+
+       if (0 != put_user(point, &uargs->point)) return -EFAULT;
+
+       return 0;
+}
+
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs)
+{
+       u32 timeout, status;
+       mali_bool ret;
+       _mali_uk_fence_t uk_fence;
+       struct mali_timeline_fence fence;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+       if (0 != get_user(timeout, &uargs->timeout)) return -EFAULT;
+
+       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+       ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout);
+       status = (MALI_TRUE == ret ? 1 : 0);
+
+       if (0 != put_user(status, &uargs->status)) return -EFAULT;
+
+       return 0;
+}
+
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs)
+{
+       s32 sync_fd = -1;
+       _mali_uk_fence_t uk_fence;
+       struct mali_timeline_fence fence;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+#if defined(CONFIG_SYNC)
+       sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence);
+#else
+       sync_fd = -1;
+#endif /* defined(CONFIG_SYNC) */
+
+       if (0 != put_user(sync_fd, &uargs->sync_fd)) return -EFAULT;
+
+       return 0;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_vsync.c b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_vsync.c
new file mode 100644 (file)
index 0000000..313963c
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <asm/uaccess.h>    /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+       _mali_uk_vsync_event_report_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = session_data;
+       err = _mali_ukk_vsync_event_report(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_wrappers.h b/drivers/gpu/arm/mali400/r4p0_rel0/linux/mali_ukk_wrappers.h
new file mode 100644 (file)
index 0000000..210b9fc
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user * uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs);
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs);
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3.c
new file mode 100644 (file)
index 0000000..957094d
--- /dev/null
@@ -0,0 +1,86 @@
+/* drivers/gpu/mali400/mali/platform/exynos3250/exynos3.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos3.c
+ * Platform specific Mali driver functions for the exynos 3XXX based platforms
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/irq.h>
+#include <plat/devs.h>
+
+#include "exynos3_pmm.h"
+
+#define MALI_GP_IRQ            EXYNOS3_IRQ_GP_3D
+#define MALI_PP0_IRQ           EXYNOS3_IRQ_PP0_3D
+#define MALI_PP1_IRQ           EXYNOS3_IRQ_PP1_3D
+#define MALI_GP_MMU_IRQ                EXYNOS3_IRQ_GPMMU_3D
+#define MALI_PP0_MMU_IRQ       EXYNOS3_IRQ_PPMMU0_3D
+#define MALI_PP1_MMU_IRQ       EXYNOS3_IRQ_PPMMU1_3D
+
+static struct resource mali_gpu_resources[] = {
+       MALI_GPU_RESOURCES_MALI400_MP2(0x13000000,
+                                               MALI_GP_IRQ, MALI_GP_MMU_IRQ,
+                                               MALI_PP0_IRQ, MALI_PP0_MMU_IRQ,
+                                               MALI_PP1_IRQ, MALI_PP1_MMU_IRQ)
+};
+
+static struct mali_gpu_device_data mali_gpu_data = {
+       .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+       .fb_start = 0x40000000,
+       .fb_size = 0xb1000000,
+       .utilization_interval = 100, /* 100ms */
+       .utilization_callback = mali_exynos_update_dvfs,
+};
+
+int mali_platform_device_register(void)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+       /* Connect resources to the device */
+       err = platform_device_add_resources(&exynos4_device_g3d,
+                                               mali_gpu_resources,
+                                               sizeof(mali_gpu_resources) /
+                                               sizeof(mali_gpu_resources[0]));
+       if (0 == err) {
+               err = platform_device_add_data(&exynos4_device_g3d,
+                                               &mali_gpu_data,
+                                               sizeof(mali_gpu_data));
+               if (0 == err) {
+                       mali_platform_init(&(exynos4_device_g3d.dev));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+                       pm_runtime_set_autosuspend_delay(&(exynos4_device_g3d.dev), 50);
+                       pm_runtime_use_autosuspend(&(exynos4_device_g3d.dev));
+#endif
+                       pm_runtime_enable(&(exynos4_device_g3d.dev));
+                       return 0;
+               }
+
+       }
+       return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+       mali_platform_deinit(&(exynos4_device_g3d.dev));
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3_pmm.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3_pmm.c
new file mode 100644 (file)
index 0000000..2e896dc
--- /dev/null
@@ -0,0 +1,253 @@
+/* drivers/gpu/mali400/mali/platform/exynos3250/exynos3_pmm.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos3_pmm.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "exynos3_pmm.h"
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <mach/regs-pmu.h>
+
+#ifdef CONFIG_MALI400_PROFILING
+#include "mali_osk_profiling.h"
+#endif
+
+#define MPLLCLK_NAME   "sclk_mpll_pre_div"
+#define MOUT0_NAME     "mout_g3d0"
+#define GPUCLK_NAME      "sclk_g3d"
+
+struct mali_exynos_dvfs_step {
+       unsigned int    rate;
+       unsigned int    downthreshold;
+       unsigned int    upthreshold;
+};
+
+struct mali_exynos_drvdata {
+       struct device                           *dev;
+
+       const struct mali_exynos_dvfs_step      *steps;
+       unsigned int                            nr_steps;
+
+       struct clk                              *mpll;
+       struct clk                              *parent;
+       struct clk                              *sclk;
+
+       mali_power_mode                         power_mode;
+       unsigned int                            dvfs_step;
+       unsigned int                            load;
+
+       struct work_struct                      dvfs_work;
+       struct workqueue_struct                 *dvfs_workqueue;
+};
+
+static struct mali_exynos_drvdata *mali;
+
+#define MALI_DVFS_STEP(freq, down, up) \
+       {freq * 1000000, (256 * down) / 100, (256 * up) / 100}
+
+static const struct mali_exynos_dvfs_step mali_exynos_dvfs_step_tbl[] = {
+       MALI_DVFS_STEP(134, 0, 100),
+       MALI_DVFS_STEP(134, 0, 100)
+};
+
+/* PegaW1 */
+int mali_gpu_clk;
+unsigned int mali_dvfs_utilization;
+
+/* export GPU frequency as a read-only parameter so that it can be read in /sys */
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
+
+#ifdef CONFIG_MALI400_PROFILING
+static inline void _mali_osk_profiling_add_gpufreq_event(int rate)
+{
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_EVENT_CHANNEL_GPU |
+                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                rate, 0, 0, 0, 0);
+}
+#else
+static inline void _mali_osk_profiling_add_gpufreq_event(int rate)
+{
+}
+#endif
+
+static void mali_exynos_set_dvfs_step(struct mali_exynos_drvdata *mali,
+                                                       unsigned int step)
+{
+       const struct mali_exynos_dvfs_step *next = &mali->steps[step];
+
+       if (mali->dvfs_step == step)
+               return;
+
+       clk_set_rate(mali->sclk, next->rate);
+
+       mali_gpu_clk = (int)(clk_get_rate(mali->sclk) / 1000000);
+
+       _mali_osk_profiling_add_gpufreq_event(mali_gpu_clk);
+
+       mali->dvfs_step = step;
+}
+
+static void mali_exynos_dvfs_work(struct work_struct *work)
+{
+       struct mali_exynos_drvdata *mali = container_of(work,
+                                       struct mali_exynos_drvdata, dvfs_work);
+       unsigned int step = mali->dvfs_step;
+       const struct mali_exynos_dvfs_step *cur = &mali->steps[step];
+
+       if (mali->load > cur->upthreshold)
+               ++step;
+       else if (mali->load < cur->downthreshold)
+               --step;
+
+       BUG_ON(step >= mali->nr_steps);
+
+       if (step != mali->dvfs_step)
+               mali_exynos_set_dvfs_step(mali, step);
+}
+
+void mali_exynos_update_dvfs(struct mali_gpu_utilization_data *data)
+{
+       mali->load = data->utilization_gpu;
+       mali_dvfs_utilization = data->utilization_gpu;
+
+       queue_work(mali->dvfs_workqueue, &mali->dvfs_work);
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev,
+                                               mali_power_mode power_mode)
+{
+       if (mali->power_mode == power_mode)
+               MALI_SUCCESS;
+       /* to avoid multiple clk_disable() call */
+       else if ((mali->power_mode > MALI_POWER_MODE_ON) &&
+                                       (power_mode > MALI_POWER_MODE_ON)) {
+               mali->power_mode = power_mode;
+               MALI_SUCCESS;
+       }
+
+       switch (power_mode) {
+       case MALI_POWER_MODE_ON:
+               mali_exynos_set_dvfs_step(mali, 1);
+               clk_enable(mali->sclk);
+               break;
+       case MALI_POWER_MODE_DEEP_SLEEP:
+       case MALI_POWER_MODE_LIGHT_SLEEP:
+               clk_disable(mali->sclk);
+               mali_exynos_set_dvfs_step(mali, 0);
+               mali_gpu_clk = 0;
+               break;
+       }
+
+       mali->power_mode = power_mode;
+
+       MALI_SUCCESS;
+}
+
+static mali_bool mali_clk_get(struct mali_exynos_drvdata *mali)
+{
+       mali->mpll = clk_get(NULL, MPLLCLK_NAME);
+       if (IS_ERR(mali->mpll)) {
+               MALI_PRINT_ERROR(("failed to get sclk_mpll_pre_div clock"));
+               return MALI_FALSE;
+       }
+
+       mali->parent = clk_get(NULL, MOUT0_NAME);
+       if (IS_ERR(mali->parent)) {
+               MALI_PRINT_ERROR(("failed to get parent_clock"));
+               return MALI_FALSE;
+       }
+
+       mali->sclk = clk_get(NULL, GPUCLK_NAME);
+       if (IS_ERR(mali->sclk)) {
+               MALI_PRINT_ERROR(("failed to get sclk_clock"));
+               return MALI_FALSE;
+       }
+
+       return MALI_TRUE;
+}
+
+static void mali_clk_put(struct mali_exynos_drvdata *mali,
+                                               mali_bool binc_mali_clock)
+{
+       if (mali->parent) {
+               clk_put(mali->parent);
+               mali->parent = NULL;
+       }
+
+       if (mali->mpll) {
+               clk_put(mali->mpll);
+               mali->mpll = NULL;
+       }
+
+       if (binc_mali_clock && mali->sclk) {
+               clk_put(mali->sclk);
+               mali->sclk = NULL;
+       }
+}
+
+_mali_osk_errcode_t mali_platform_init(struct device *dev)
+{
+       mali = kzalloc(sizeof(*mali), GFP_KERNEL);
+       if (WARN_ON(!mali))
+               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+       mali->steps = mali_exynos_dvfs_step_tbl;
+       mali->nr_steps = ARRAY_SIZE(mali_exynos_dvfs_step_tbl);
+
+       if (!mali_clk_get(mali)) {
+               MALI_PRINT_ERROR(("Failed to get Mali clocks"));
+               goto err_clk_put;
+       }
+
+       clk_set_parent(mali->parent, mali->mpll);
+       clk_set_parent(mali->sclk, mali->parent);
+
+       mali->dvfs_workqueue = create_singlethread_workqueue("mali_dvfs");
+       if (WARN_ON(!mali->dvfs_workqueue)) {
+               MALI_PRINT_ERROR(("failed to create workqueue"));
+               goto err_clk_put;
+       }
+
+       mali->power_mode = MALI_POWER_MODE_DEEP_SLEEP;
+
+       INIT_WORK(&mali->dvfs_work, mali_exynos_dvfs_work);
+
+       mali_exynos_set_dvfs_step(mali, 1);
+
+       mali_clk_put(mali, MALI_FALSE);
+
+       MALI_SUCCESS;
+
+err_clk_put:
+       mali_clk_put(mali, MALI_TRUE);
+       kfree(mali);
+       MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev)
+{
+       mali_clk_put(mali, MALI_TRUE);
+
+       kfree(mali);
+
+       MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3_pmm.h b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3250/exynos3_pmm.h
new file mode 100644 (file)
index 0000000..99032cc
--- /dev/null
@@ -0,0 +1,82 @@
+/* drivers/gpu/mali400/mali/platform/exynos3250/exynos3_pmm.h
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos3_pmm.h
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifndef __EXYNOS4_PMM_H__
+#define __EXYNOS4_PMM_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_utilization.h"
+#include <linux/platform_device.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,
+       MALI_POWER_MODE_LIGHT_SLEEP,
+       MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(struct device *dev);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ *  1) MALI_POWER_MODE_ON
+ *  2) MALI_POWER_MODE_LIGHT_SLEEP
+ *  3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode);
+
+void mali_exynos_update_dvfs(struct mali_gpu_utilization_data *data);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4.c
new file mode 100644 (file)
index 0000000..caa5909
--- /dev/null
@@ -0,0 +1,86 @@
+/* drivers/gpu/mali400/mali/platform/exynos3470/exynos4.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+#include <plat/devs.h>
+#include "mali_kernel_common.h"
+#include "exynos4_pmm.h"
+
+#define MALI_GP_IRQ       EXYNOS4_IRQ_GP_3D
+#define MALI_PP0_IRQ      EXYNOS4_IRQ_PP0_3D
+#define MALI_PP1_IRQ      EXYNOS4_IRQ_PP1_3D
+#define MALI_PP2_IRQ      EXYNOS4_IRQ_PP2_3D
+#define MALI_PP3_IRQ      EXYNOS4_IRQ_PP3_3D
+#define MALI_GP_MMU_IRQ   EXYNOS4_IRQ_GPMMU_3D
+#define MALI_PP0_MMU_IRQ  EXYNOS4_IRQ_PPMMU0_3D
+#define MALI_PP1_MMU_IRQ  EXYNOS4_IRQ_PPMMU1_3D
+#define MALI_PP2_MMU_IRQ  EXYNOS4_IRQ_PPMMU2_3D
+#define MALI_PP3_MMU_IRQ  EXYNOS4_IRQ_PPMMU3_3D
+
+static struct resource mali_gpu_resources[] =
+{
+       MALI_GPU_RESOURCES_MALI400_MP4(0x13000000,
+                                      MALI_GP_IRQ, MALI_GP_MMU_IRQ,
+                                      MALI_PP0_IRQ, MALI_PP0_MMU_IRQ,
+                                      MALI_PP1_IRQ, MALI_PP1_MMU_IRQ,
+                                      MALI_PP2_IRQ, MALI_PP2_MMU_IRQ,
+                                      MALI_PP3_IRQ, MALI_PP3_MMU_IRQ)
+};
+
+static struct mali_gpu_device_data mali_gpu_data =
+{
+       .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+       .fb_start = 0x40000000,
+       .fb_size = 0xb1000000,
+       .utilization_interval = 100, /* 100ms */
+       .utilization_callback = mali_gpu_utilization_handler,
+};
+
+int mali_platform_device_register(void)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+       /* Connect resources to the device */
+       err = platform_device_add_resources(&exynos4_device_g3d, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
+       if (0 == err)
+       {
+               err = platform_device_add_data(&exynos4_device_g3d, &mali_gpu_data, sizeof(mali_gpu_data));
+               if (0 == err)
+               {
+                       mali_platform_init(&(exynos4_device_g3d.dev));
+#ifdef CONFIG_PM_RUNTIME
+                       pm_runtime_set_autosuspend_delay(&(exynos4_device_g3d.dev), 1000);
+                       pm_runtime_use_autosuspend(&(exynos4_device_g3d.dev));
+                       pm_runtime_enable(&(exynos4_device_g3d.dev));
+#endif
+                       return 0;
+               }
+
+       }
+       return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+       mali_platform_deinit(&(exynos4_device_g3d.dev));
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4_pmm.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4_pmm.c
new file mode 100644 (file)
index 0000000..d5af6a2
--- /dev/null
@@ -0,0 +1,1045 @@
+/* drivers/gpu/mali400/mali/platform/exynos3470/exynos4_pmm.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "exynos4_pmm.h"
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* lock/unlock CPU freq by Mali */
+#include <linux/types.h>
+#include <mach/cpufreq.h>
+#include <mach/regs-clock.h>
+#include <mach/asv-exynos.h>
+#ifdef CONFIG_CPU_FREQ
+#define EXYNOS4_ASV_ENABLED
+#endif
+
+/* Some defines changed names in later Odroid-A kernels. Make sure it works for both. */
+#ifndef S5P_G3D_CONFIGURATION
+#define S5P_G3D_CONFIGURATION EXYNOS4_G3D_CONFIGURATION
+#endif
+#ifndef S5P_G3D_STATUS
+#define S5P_G3D_STATUS (EXYNOS4_G3D_CONFIGURATION + 0x4)
+#endif
+#ifndef S5P_INT_LOCAL_PWR_EN
+#define S5P_INT_LOCAL_PWR_EN EXYNOS_INT_LOCAL_PWR_EN
+#endif
+
+#include <asm/io.h>
+#include <mach/regs-pmu.h>
+#include <linux/workqueue.h>
+
+#define MALI_DVFS_STEPS 4
+#define MALI_DVFS_WATING 10 /* msec */
+#define MALI_DVFS_DEFAULT_STEP 1
+
+#define MALI_DVFS_CLK_DEBUG 0
+#define CPUFREQ_LOCK_DURING_440 1
+
+static int bMaliDvfsRun = 0;
+
+typedef struct mali_dvfs_tableTag{
+       unsigned int clock;
+       unsigned int freq;
+       unsigned int vol;
+       unsigned int downthreshold;
+       unsigned int upthreshold;
+}mali_dvfs_table;
+
+typedef struct mali_dvfs_statusTag{
+       unsigned int currentStep;
+       mali_dvfs_table * pCurrentDvfs;
+
+} mali_dvfs_status_t;
+
+/*dvfs status*/
+mali_dvfs_status_t maliDvfsStatus;
+int mali_dvfs_control;
+static _mali_osk_atomic_t dvfslock_status;
+
+typedef struct mali_runtime_resumeTag{
+               int clk;
+               int vol;
+               unsigned int step;
+}mali_runtime_resume_table;
+
+mali_runtime_resume_table mali_runtime_resume = {266, 850000, 1};
+
+/*dvfs table updated on 130520*/
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS + 1]={
+       /*step 0*/{160, 1000000, 850000,   0,  70},
+       /*step 1*/{266, 1000000, 850000,  62,  90},
+       /*step 2*/{340, 1000000, 875000,  85,  90},
+       /*step 3*/{440, 1000000, 925000,  85, 100},
+       /*step 4*/{450, 1000000, 925000, 100, 100} };
+
+/* Exynos3470 */
+int mali_gpu_clk = 266;
+int mali_gpu_vol = 850000;
+unsigned int mali_vpll_clk = 900;
+char *mali_freq_table = "440 340 266 160";
+#define EXTXTALCLK_NAME  "ext_xtal"
+#define VPLLSRCCLK_NAME  "vpll_src"
+#define FOUTVPLLCLK_NAME "fout_vpll"
+#define MOUTEPLLCLK_NAME "mout_epll"
+#define SCLVPLLCLK_NAME  "sclk_vpll"
+#define GPUMOUT1CLK_NAME "mout_g3d1"
+#define GPUMOUT0CLK_NAME "mout_g3d0"
+#define GPUCLK_NAME      "sclk_g3d"
+#define CLK_DIV_STAT_G3D 0x1003C62C
+#define CLK_DESC         "clk-divider-status"
+#define ISP_LOCK_STEP 4
+
+static struct clk *ext_xtal_clock = NULL;
+static struct clk *vpll_src_clock = NULL;
+static struct clk *fout_vpll_clock = NULL;
+static struct clk *mout_epll_clock = NULL;
+static struct clk *sclk_vpll_clock = NULL;
+static struct clk *mali_parent_clock = NULL;
+static struct clk *mali_clock = NULL;
+
+static unsigned int GPU_MHZ    = 1000000;
+static unsigned int const GPU_ASV_VOLT = 1000;
+static int nPowermode;
+static atomic_t clk_active;
+
+mali_io_address clk_register_map = 0;
+
+/* export GPU frequency as a read-only parameter so that it can be read in /sys */
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(mali_gpu_vol, int, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(mali_freq_table, charp, S_IRUSR | S_IRGRP | S_IROTH);
+#ifdef CONFIG_MALI_DVFS
+module_param(mali_dvfs_control, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_dvfs_control, "Mali Current DVFS");
+DEVICE_ATTR(time_in_state, S_IRUGO|S_IWUSR, show_time_in_state, set_time_in_state);
+MODULE_PARM_DESC(time_in_state, "Time-in-state of Mali DVFS");
+#endif
+MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
+MODULE_PARM_DESC(mali_gpu_vol, "Mali Current Voltage");
+MODULE_PARM_DESC(mali_freq_table, "Mali frequency table");
+
+#ifdef CONFIG_REGULATOR
+struct regulator *g3d_regulator = NULL;
+#endif
+atomic_t mali_cpufreq_lock;
+
+/* DVFS */
+#ifdef CONFIG_MALI_DVFS
+static unsigned int mali_dvfs_utilization = 255;
+static void update_time_in_state(int level);
+u64 mali_dvfs_time[MALI_DVFS_STEPS];
+#endif
+
+static void mali_dvfs_work_handler(struct work_struct *w);
+static struct workqueue_struct *mali_dvfs_wq = 0;
+_mali_osk_mutex_t *mali_dvfs_lock;
+_mali_osk_mutex_t *mali_isp_lock;
+int mali_runtime_resumed = -1;
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_work_handler);
+
+int cpufreq_lock_by_mali(unsigned int freq)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+       unsigned int level;
+
+       if (atomic_read(&mali_cpufreq_lock) == 0) {
+               if (exynos_cpufreq_get_level(freq * 1000, &level)) {
+                       printk(KERN_ERR "Mali: failed to get cpufreq level for %dMHz", freq);
+                       return -EINVAL;
+               }
+
+               if (exynos_cpufreq_lock(DVFS_LOCK_ID_G3D, level)) {
+                       printk(KERN_ERR "Mali: failed to cpufreq lock for L%d", level);
+                       return -EINVAL;
+               }
+
+               atomic_set(&mali_cpufreq_lock, 1);
+               printk(KERN_DEBUG "Mali: cpufreq locked on <%d>%dMHz\n", level, freq);
+       }
+#endif
+       return 0;
+}
+
+void cpufreq_unlock_by_mali(void)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+       if (atomic_read(&mali_cpufreq_lock) == 1) {
+               exynos_cpufreq_lock_free(DVFS_LOCK_ID_G3D);
+               atomic_set(&mali_cpufreq_lock, 0);
+               printk(KERN_DEBUG "Mali: cpufreq locked off\n");
+       }
+#endif
+}
+
+#ifdef CONFIG_REGULATOR
+void mali_regulator_disable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_disable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_disable(g3d_regulator);
+}
+
+void mali_regulator_enable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_enable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_enable(g3d_regulator);
+}
+
+void mali_regulator_set_voltage(int min_uV, int max_uV)
+{
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               return;
+       }
+       MALI_DEBUG_PRINT(1, ("= regulator_set_voltage: %d, %d \n", min_uV, max_uV));
+       regulator_set_voltage(g3d_regulator, min_uV, max_uV);
+       mali_gpu_vol = regulator_get_voltage(g3d_regulator);
+       MALI_DEBUG_PRINT(1, ("Mali voltage: %d\n", mali_gpu_vol));
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+}
+#endif
+
+#ifdef CONFIG_MALI_DVFS
+static unsigned int get_mali_dvfs_status(void)
+{
+       return maliDvfsStatus.currentStep;
+}
+#endif
+
+mali_bool mali_clk_get(void)
+{
+       if (ext_xtal_clock == NULL)     {
+               ext_xtal_clock = clk_get(NULL, EXTXTALCLK_NAME);
+               if (IS_ERR(ext_xtal_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source ext_xtal_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (vpll_src_clock == NULL)     {
+               vpll_src_clock = clk_get(NULL, VPLLSRCCLK_NAME);
+               if (IS_ERR(vpll_src_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source vpll_src_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (fout_vpll_clock == NULL) {
+               fout_vpll_clock = clk_get(NULL, FOUTVPLLCLK_NAME);
+               if (IS_ERR(fout_vpll_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source fout_vpll_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (mout_epll_clock == NULL) {
+               mout_epll_clock = clk_get(NULL, MOUTEPLLCLK_NAME);
+               if (IS_ERR(mout_epll_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source mout_epll_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (sclk_vpll_clock == NULL) {
+               sclk_vpll_clock = clk_get(NULL, SCLVPLLCLK_NAME);
+               if (IS_ERR(sclk_vpll_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source sclk_vpll_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (mali_parent_clock == NULL) {
+               mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);
+               if (IS_ERR(mali_parent_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source mali parent clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       /* mali clock get always. */
+       if (mali_clock == NULL) {
+               mali_clock = clk_get(NULL, GPUCLK_NAME);
+               if (IS_ERR(mali_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source mali clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       return MALI_TRUE;
+}
+
+void mali_clk_put(mali_bool binc_mali_clock)
+{
+       if (mali_parent_clock)
+       {
+               clk_put(mali_parent_clock);
+               mali_parent_clock = NULL;
+       }
+
+       if (sclk_vpll_clock)
+       {
+               clk_put(sclk_vpll_clock);
+               sclk_vpll_clock = NULL;
+       }
+
+       if (binc_mali_clock && fout_vpll_clock)
+       {
+               clk_put(fout_vpll_clock);
+               fout_vpll_clock = NULL;
+       }
+
+       if (mout_epll_clock)
+       {
+               clk_put(mout_epll_clock);
+               mout_epll_clock = NULL;
+       }
+
+       if (vpll_src_clock)
+       {
+               clk_put(vpll_src_clock);
+               vpll_src_clock = NULL;
+       }
+
+       if (ext_xtal_clock)
+       {
+               clk_put(ext_xtal_clock);
+               ext_xtal_clock = NULL;
+       }
+
+       if (binc_mali_clock && mali_clock)
+       {
+               clk_put(mali_clock);
+               mali_clock = NULL;
+       }
+}
+
+void mali_clk_set_rate(unsigned int clk, unsigned int mhz)
+{
+       int err;
+       unsigned int read_val;
+       unsigned long rate = (unsigned long)clk * (unsigned long)mhz;
+
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+
+       MALI_DEBUG_PRINT(3, ("Mali platform: Setting frequency to %d mhz\n", clk));
+
+       if (mali_clk_get() == MALI_FALSE) {
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               return;
+       }
+       clk_set_parent(mali_parent_clock, mout_epll_clock);
+
+       do {
+               cpu_relax();
+               read_val = __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0);
+       } while (((read_val >> 4) & 0x7) != 0x1);
+
+       MALI_DEBUG_PRINT(3, ("Mali platform: set to EPLL EXYNOS4_CLKMUX_STAT_G3D0 : 0x%08x\n", __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0)));
+
+       err = clk_set_parent(sclk_vpll_clock, ext_xtal_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("sclk_vpll set parent to ext_xtal failed\n"));
+
+       MALI_DEBUG_PRINT(3, ("Mali platform: set_parent_vpll : %8.x \n", (__raw_readl(EXYNOS4_CLKSRC_TOP0) >> 8) & 0x1));
+
+       clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
+       clk_set_parent(vpll_src_clock, ext_xtal_clock);
+
+       err = clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("sclk_vpll set parent to fout_vpll failed\n"));
+
+       MALI_DEBUG_PRINT(3, ("Mali platform: set_parent_vpll : %8.x \n", (__raw_readl(EXYNOS4_CLKSRC_TOP0) >> 8) & 0x1));
+
+       clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+
+       do {
+               cpu_relax();
+               read_val = __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0);
+       } while (((read_val >> 4) & 0x7) != 0x2);
+
+       MALI_DEBUG_PRINT(3, ("SET to VPLL EXYNOS4_CLKMUX_STAT_G3D0 : 0x%08x\n", __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0)));
+
+       clk_set_parent(mali_clock, mali_parent_clock);
+
+       if (atomic_read(&clk_active) == 0) {
+               if (clk_enable(mali_clock) < 0) {
+                       _mali_osk_mutex_signal(mali_dvfs_lock);
+                       return;
+               }
+               atomic_set(&clk_active, 1);
+       }
+
+       err = clk_set_rate(mali_clock, rate);
+
+       if (err > 0)
+               MALI_PRINT_ERROR(("Failed to set Mali clock: %d\n", err));
+
+       rate = clk_get_rate(mali_clock);
+
+       MALI_DEBUG_PRINT(1, ("Mali frequency %d\n", rate / mhz));
+       GPU_MHZ = mhz;
+
+       mali_gpu_clk = (int)(rate / mhz);
+       mali_clk_put(MALI_FALSE);
+
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+}
+
+#ifdef CONFIG_MALI_DVFS
+mali_bool set_mali_dvfs_current_step(unsigned int step)
+{
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       maliDvfsStatus.currentStep = step % MALI_DVFS_STEPS;
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+       return MALI_TRUE;
+}
+
+static mali_bool set_mali_dvfs_status(u32 step,mali_bool boostup)
+{
+#if MALI_DVFS_CLK_DEBUG
+       unsigned int *pRegMaliClkDiv;
+       unsigned int *pRegMaliMpll;
+#endif
+
+       _mali_osk_mutex_wait(mali_isp_lock);
+
+       if (mali_dvfs_control == mali_dvfs[ISP_LOCK_STEP].clock) {
+               MALI_DEBUG_PRINT(1, ("DVFS is already locked by ISP\n"));
+               _mali_osk_mutex_signal(mali_isp_lock);
+               return MALI_TRUE;
+       } else if (step == ISP_LOCK_STEP) {
+               step = mali_runtime_resume.step;
+       }
+
+       if(boostup)     {
+#ifdef CONFIG_REGULATOR
+               /*change the voltage*/
+#ifdef EXYNOS4_ASV_ENABLED
+               mali_regulator_set_voltage(get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT), get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+#else
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+#endif
+               /*change the clock*/
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+       } else {
+               /*change the clock*/
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+#ifdef CONFIG_REGULATOR
+#ifdef EXYNOS4_ASV_ENABLED
+               /*change the voltage*/
+               mali_regulator_set_voltage(get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT), get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+#else
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+#endif
+       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+       mali_clk_put(MALI_FALSE);
+
+#if MALI_DVFS_CLK_DEBUG
+       pRegMaliClkDiv = ioremap(0x1003c52c, 32);
+       pRegMaliMpll = ioremap(0x1003c22c, 32);
+       MALI_PRINT(("Mali MPLL reg:%d, CLK DIV: %d \n", *pRegMaliMpll, *pRegMaliClkDiv));
+#endif
+       set_mali_dvfs_current_step(step);
+       /*for future use*/
+       maliDvfsStatus.pCurrentDvfs = &mali_dvfs[step];
+
+#if CPUFREQ_LOCK_DURING_440
+       /* lock/unlock CPU freq by Mali */
+       if (mali_dvfs[step].clock >= 440)
+               cpufreq_lock_by_mali(400);
+       else
+               cpufreq_unlock_by_mali();
+#endif
+       _mali_osk_mutex_signal(mali_isp_lock);
+       return MALI_TRUE;
+}
+
+static void mali_platform_wating(u32 msec)
+{
+       /*
+       * sample wating
+       * change this in the future with proper check routine.
+       */
+       unsigned int read_val;
+       while(1) {
+               read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
+               if ((read_val & 0x8000)==0x0000) break;
+
+               _mali_osk_time_ubusydelay(100); /* 1000 -> 100 : 20101218 */
+       }
+}
+
+static mali_bool change_mali_dvfs_status(u32 step, mali_bool boostup )
+{
+       MALI_DEBUG_PRINT(4, ("> change_mali_dvfs_status: %d, %d \n", step, boostup));
+
+       if (!set_mali_dvfs_status(step, boostup)) {
+               MALI_DEBUG_PRINT(1, ("error on set_mali_dvfs_status: %d, %d \n",step, boostup));
+               return MALI_FALSE;
+       }
+
+       /* wait until clock and voltage is stablized */
+       mali_platform_wating(MALI_DVFS_WATING); /* msec */
+       return MALI_TRUE;
+}
+
+static unsigned int decideNextStatus(unsigned int utilization)
+{
+       static unsigned int level = 0;
+       int iStepCount = 0;
+
+       if (mali_runtime_resumed >= 0) {
+               level = mali_runtime_resumed;
+               mali_runtime_resumed = -1;
+       }
+
+       if (mali_dvfs_control == 0) {
+               if (utilization > (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].upthreshold / 100) &&
+                               level < MALI_DVFS_STEPS - 1) {
+                       level++;
+               } else if (utilization < (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].downthreshold / 100) &&
+                       level > 0) {
+                       level--;
+               }
+       } else if (mali_dvfs_control == mali_dvfs[ISP_LOCK_STEP].clock) {
+               level = ISP_LOCK_STEP;
+       } else {
+               for (iStepCount = MALI_DVFS_STEPS - 1; iStepCount >= 0; iStepCount--) {
+                       if (mali_dvfs_control >= mali_dvfs[iStepCount].clock) {
+                               level = iStepCount;
+                               break;
+                       }
+               }
+       }
+
+       return level;
+}
+
+static mali_bool mali_dvfs_status(unsigned int utilization)
+{
+       unsigned int nextStatus = 0;
+       unsigned int curStatus = 0;
+       mali_bool boostup = MALI_FALSE;
+       static int stay_count = 5;
+
+       MALI_DEBUG_PRINT(4, ("> mali_dvfs_status: %d \n", utilization));
+
+       /* decide next step */
+       curStatus = get_mali_dvfs_status();
+       nextStatus = decideNextStatus(utilization);
+
+       MALI_DEBUG_PRINT(4, ("= curStatus %d, nextStatus %d, maliDvfsStatus.currentStep %d \n", curStatus, nextStatus, maliDvfsStatus.currentStep));
+       /* if next status is same with current status, don't change anything */
+       if(curStatus != nextStatus) {
+               /*check if boost up or not*/
+               if(maliDvfsStatus.currentStep < nextStatus) {
+                       boostup = 1;
+                       stay_count = 5;
+               } else if (maliDvfsStatus.currentStep > nextStatus){
+                       stay_count--;
+               }
+
+               if( boostup == 1 || stay_count <= 0){
+                       /*change mali dvfs status*/
+                       update_time_in_state(curStatus);
+                       if (!change_mali_dvfs_status(nextStatus, boostup)) {
+                               MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
+                               return MALI_FALSE;
+                       }
+                       boostup = 0;
+                       stay_count = 5;
+               }
+       }
+       else
+               stay_count = 5;
+
+       return MALI_TRUE;
+}
+#endif
+
+static void mali_dvfs_work_handler(struct work_struct *w)
+{
+       bMaliDvfsRun = 1;
+       MALI_DEBUG_PRINT(3, ("=== mali_dvfs_work_handler\n"));
+
+#ifdef CONFIG_MALI_DVFS
+       if(!mali_dvfs_status(mali_dvfs_utilization))
+               MALI_DEBUG_PRINT(1, ( "error on mali dvfs status in mali_dvfs_work_handler"));
+#endif
+
+       bMaliDvfsRun = 0;
+}
+
+mali_bool init_mali_dvfs_status(void)
+{
+       /*
+       * default status
+       * add here with the right function to get initilization value.
+       */
+       if (!mali_dvfs_wq)
+               mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+
+    _mali_osk_atomic_init(&dvfslock_status, 0);
+
+       /* add a error handling here */
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+
+       return MALI_TRUE;
+}
+
+void deinit_mali_dvfs_status(void)
+{
+       if (mali_dvfs_wq)
+               destroy_workqueue(mali_dvfs_wq);
+
+       _mali_osk_atomic_term(&dvfslock_status);
+
+       mali_dvfs_wq = NULL;
+}
+
+#ifdef CONFIG_MALI_DVFS
+mali_bool mali_dvfs_handler(unsigned int utilization)
+{
+       mali_dvfs_utilization = utilization;
+       queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
+
+       return MALI_TRUE;
+}
+#endif
+
+static mali_bool init_mali_clock(void)
+{
+       mali_bool ret = MALI_TRUE;
+       nPowermode = MALI_POWER_MODE_DEEP_SLEEP;
+
+       if (mali_clock != 0)
+               return ret; /* already initialized */
+
+       mali_dvfs_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0);
+       mali_isp_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0);
+
+       if (mali_dvfs_lock == NULL || mali_isp_lock == NULL)
+               return _MALI_OSK_ERR_FAULT;
+
+       if (!mali_clk_get())
+       {
+               MALI_PRINT(("Error: Failed to get Mali clock\n"));
+               goto err_clk;
+       }
+
+       clk_set_parent(vpll_src_clock, ext_xtal_clock);
+       clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+       clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+       clk_set_parent(mali_clock, mali_parent_clock);
+
+       if (!atomic_read(&clk_active)) {
+               if (clk_enable(mali_clock) < 0) {
+                       MALI_PRINT(("Error: Failed to enable clock\n"));
+                       goto err_clk;
+               }
+               atomic_set(&clk_active, 1);
+       }
+
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+
+       MALI_PRINT(("init_mali_clock mali_clock %x\n", mali_clock));
+
+#ifdef CONFIG_REGULATOR
+       g3d_regulator = regulator_get(NULL, "vdd_g3d");
+
+       if (IS_ERR(g3d_regulator))
+       {
+               MALI_PRINT( ("MALI Error : failed to get vdd_g3d\n"));
+               ret = MALI_FALSE;
+               goto err_regulator;
+       }
+
+       mali_gpu_vol = mali_runtime_resume.vol;
+#ifdef EXYNOS4_ASV_ENABLED
+       mali_gpu_vol = get_match_volt(ID_G3D, mali_gpu_clk * GPU_ASV_VOLT);
+       mali_runtime_resume.vol = get_match_volt(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT);
+#endif
+
+       regulator_enable(g3d_regulator);
+       mali_regulator_set_voltage(mali_gpu_vol, mali_gpu_vol);
+
+#ifdef EXYNOS4_ASV_ENABLED
+       exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT));
+#endif
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+       mali_clk_put(MALI_FALSE);
+
+       return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+err_regulator:
+       regulator_put(g3d_regulator);
+#endif
+err_clk:
+       mali_clk_put(MALI_TRUE);
+
+       return ret;
+}
+
+static mali_bool deinit_mali_clock(void)
+{
+       if (mali_clock == 0)
+               return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+       if (g3d_regulator)
+       {
+               regulator_put(g3d_regulator);
+               g3d_regulator = NULL;
+       }
+#endif
+       mali_clk_put(MALI_TRUE);
+
+       return MALI_TRUE;
+}
+
+static _mali_osk_errcode_t enable_mali_clocks(void)
+{
+       int err;
+
+       if (atomic_read(&clk_active) == 0) {
+               err = clk_enable(mali_clock);
+               MALI_DEBUG_PRINT(3,("enable_mali_clocks mali_clock %p error %d \n", mali_clock, err));
+               atomic_set(&clk_active, 1);
+       }
+
+       _mali_osk_mutex_wait(mali_isp_lock);
+
+       if (mali_dvfs_control == mali_dvfs[ISP_LOCK_STEP].clock) {
+               MALI_DEBUG_PRINT(1, ("DVFS is already locked by ISP\n"));
+
+#ifdef EXYNOS4_ASV_ENABLED
+               if (samsung_rev() == EXYNOS3470_REV_2_0)
+                       exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_dvfs[ISP_LOCK_STEP].clock * GPU_ASV_VOLT));
+#endif
+               _mali_osk_mutex_signal(mali_isp_lock);
+               MALI_SUCCESS;
+       }
+
+       /* set clock rate */
+#ifdef CONFIG_MALI_DVFS
+#ifdef EXYNOS4_ASV_ENABLED
+       if (samsung_rev() == EXYNOS3470_REV_2_0)
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT));
+#endif
+
+       if (mali_dvfs_control != 0 || mali_gpu_clk >= mali_runtime_resume.clk) {
+               mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+       } else {
+#ifdef CONFIG_REGULATOR
+               mali_regulator_set_voltage(mali_runtime_resume.vol, mali_runtime_resume.vol);
+#ifdef EXYNOS4_ASV_ENABLED
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT));
+#endif
+#endif
+               mali_clk_set_rate(mali_runtime_resume.clk, GPU_MHZ);
+               set_mali_dvfs_current_step(mali_runtime_resume.step);
+       }
+#else
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+#endif
+
+       _mali_osk_mutex_signal(mali_isp_lock);
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t disable_mali_clocks(void)
+{
+#ifdef EXYNOS4_ASV_ENABLED
+       if (samsung_rev() == EXYNOS3470_REV_2_0)
+               exynos_set_abb(ID_G3D, ABB_BYPASS);
+#endif
+
+       if (atomic_read(&clk_active)) {
+               clk_disable(mali_clock);
+               MALI_DEBUG_PRINT(3, ("disable_mali_clocks mali_clock %p\n", mali_clock));
+               atomic_set(&clk_active, 0);
+       }
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_init(struct device *dev)
+{
+       MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
+       atomic_set(&clk_active, 0);
+
+#ifdef CONFIG_MALI_DVFS
+       if (!clk_register_map)
+               clk_register_map = _mali_osk_mem_mapioregion(CLK_DIV_STAT_G3D, 0x20, CLK_DESC);
+
+       if (!init_mali_dvfs_status())
+               MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n"));
+
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+#endif
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_ON);
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev)
+{
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_DEEP_SLEEP);
+       deinit_mali_clock();
+
+#ifdef CONFIG_MALI_DVFS
+       deinit_mali_dvfs_status();
+       if (clk_register_map)
+       {
+               _mali_osk_mem_unmapioregion(CLK_DIV_STAT_G3D, 0x20, clk_register_map);
+               clk_register_map = NULL;
+       }
+#endif
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode)
+{
+       switch (power_mode)
+       {
+       case MALI_POWER_MODE_ON:
+               MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n",
+                                       nPowermode ? "powering on" : "already on"));
+               if (nPowermode == MALI_POWER_MODE_LIGHT_SLEEP || nPowermode == MALI_POWER_MODE_DEEP_SLEEP)      {
+                       MALI_DEBUG_PRINT(4, ("enable clock\n"));
+                       enable_mali_clocks();
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                       MALI_PROFILING_EVENT_CHANNEL_GPU |
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+                       nPowermode = power_mode;
+               }
+               break;
+       case MALI_POWER_MODE_DEEP_SLEEP:
+       case MALI_POWER_MODE_LIGHT_SLEEP:
+               MALI_DEBUG_PRINT(3, ("Mali platform: Got %s event, %s\n", power_mode == MALI_POWER_MODE_LIGHT_SLEEP ?
+                                       "MALI_POWER_MODE_LIGHT_SLEEP" : "MALI_POWER_MODE_DEEP_SLEEP",
+                                       nPowermode ? "already off" : "powering off"));
+               if (nPowermode == MALI_POWER_MODE_ON)   {
+                       disable_mali_clocks();
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                       MALI_PROFILING_EVENT_CHANNEL_GPU |
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       0, 0, 0, 0, 0);
+#endif
+                       nPowermode = power_mode;
+               }
+               break;
+       }
+       MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data)
+{
+       if (nPowermode == MALI_POWER_MODE_ON)
+       {
+#ifdef CONFIG_MALI_DVFS
+               if(!mali_dvfs_handler(data->utilization_gpu))
+                       MALI_DEBUG_PRINT(1, ("error on mali dvfs status in utilization\n"));
+#endif
+       }
+}
+
+#ifdef CONFIG_MALI_DVFS
+static void update_time_in_state(int level)
+{
+       u64 current_time;
+       static u64 prev_time = 0;
+
+       if (prev_time == 0)
+               prev_time = get_jiffies_64();
+
+       current_time = get_jiffies_64();
+       mali_dvfs_time[level] += current_time - prev_time;
+       prev_time = current_time;
+}
+
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       ssize_t ret = 0;
+       int i;
+
+       update_time_in_state(maliDvfsStatus.currentStep);
+
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d %llu\n",
+                                               mali_dvfs[i].clock,
+                                               mali_dvfs_time[i]);
+       }
+
+       if (ret < PAGE_SIZE - 1) {
+               ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+       } else {
+               buf[PAGE_SIZE - 2] = '\n';
+               buf[PAGE_SIZE - 1] = '\0';
+               ret = PAGE_SIZE - 1;
+       }
+       return ret;
+}
+
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       int i;
+
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               mali_dvfs_time[i] = 0;
+       }
+       return count;
+}
+#endif
+
+int mali_dvfs_level_lock(void)
+{
+       int prev_status = _mali_osk_atomic_read(&dvfslock_status);
+       unsigned long rate = (mali_dvfs[ISP_LOCK_STEP].clock * mali_dvfs[ISP_LOCK_STEP].freq);
+       unsigned int read_val;
+#ifdef EXYNOS4_ASV_ENABLED
+       int lock_vol;
+#endif
+
+       if (prev_status < 0) {
+               MALI_PRINT(("DVFS lock status is not valid for lock\n"));
+               return -1;
+       } else if (prev_status > 0) {
+               MALI_PRINT(("DVFS lock already enabled\n"));
+               return -1;
+       }
+
+       _mali_osk_mutex_wait(mali_isp_lock);
+
+#ifdef CONFIG_REGULATOR
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+               _mali_osk_mutex_signal(mali_isp_lock);
+               return -1;
+       }
+
+#ifdef EXYNOS4_ASV_ENABLED
+       lock_vol = get_match_volt(ID_G3D, mali_dvfs[ISP_LOCK_STEP].clock * GPU_ASV_VOLT);
+       regulator_set_voltage(g3d_regulator, lock_vol, lock_vol);
+       exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_dvfs[ISP_LOCK_STEP].clock * GPU_ASV_VOLT));
+#else
+
+       regulator_set_voltage(g3d_regulator, mali_dvfs[ISP_LOCK_STEP].vol, mali_dvfs[ISP_LOCK_STEP].vol);
+#endif
+       mali_gpu_vol = regulator_get_voltage(g3d_regulator);
+       MALI_DEBUG_PRINT(1, ("Mali voltage: %d\n", mali_gpu_vol));
+#endif
+
+       if (mali_clk_get() == MALI_FALSE) {
+               _mali_osk_mutex_signal(mali_isp_lock);
+               return -1;
+       }
+       clk_set_rate(mali_clock, (clk_get_rate(mali_clock) / 2));
+       clk_set_parent(mali_parent_clock, mout_epll_clock);
+       clk_set_parent(sclk_vpll_clock, ext_xtal_clock);
+       clk_set_rate(fout_vpll_clock, mali_vpll_clk * GPU_MHZ);
+       clk_set_parent(vpll_src_clock, ext_xtal_clock);
+       clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+       clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+
+       do {
+               cpu_relax();
+               read_val = __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0);
+       } while (((read_val >> 4) & 0x7) != 0x2);
+
+       clk_set_parent(mali_clock, mali_parent_clock);
+       clk_set_rate(mali_clock, rate);
+
+       rate = clk_get_rate(mali_clock);
+       mali_gpu_clk = (int)(rate / GPU_MHZ);
+       mali_clk_put(MALI_FALSE);
+
+       mali_dvfs_control = mali_dvfs[ISP_LOCK_STEP].clock;
+
+       _mali_osk_mutex_signal(mali_isp_lock);
+       MALI_DEBUG_PRINT(1, ("DVFS is locked by ISP\n"));
+
+       return _mali_osk_atomic_inc_return(&dvfslock_status);
+}
+
+int mali_dvfs_level_unlock(void)
+{
+       int prev_status = _mali_osk_atomic_read(&dvfslock_status);
+
+       if (prev_status <= 0) {
+               MALI_PRINT(("DVFS lock status is not valid for unlock\n"));
+               return -1;
+       } else if (prev_status >= 1) {
+               _mali_osk_mutex_wait(mali_isp_lock);
+               maliDvfsStatus.currentStep = mali_runtime_resume.step;
+               mali_gpu_clk = mali_runtime_resume.clk;
+
+               mali_dvfs_control = 0;
+               MALI_DEBUG_PRINT(1, ("DVFS lock is released ISP\n"));
+               _mali_osk_mutex_signal(mali_isp_lock);
+       }
+
+       return _mali_osk_atomic_dec_return(&dvfslock_status);
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4_pmm.h b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos3470/exynos4_pmm.h
new file mode 100644 (file)
index 0000000..0886a35
--- /dev/null
@@ -0,0 +1,95 @@
+/* drivers/gpu/mali400/mali/platform/exynos3470/exynos4_pmm.h
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.h
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifndef __EXYNOS4_PMM_H__
+#define __EXYNOS4_PMM_H__
+
+#include "mali_utgard.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,
+       MALI_POWER_MODE_LIGHT_SLEEP,
+       MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(struct device *dev);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ *  1) MALI_POWER_MODE_ON
+ *  2) MALI_POWER_MODE_LIGHT_SLEEP
+ *  3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data);
+
+#ifdef CONFIG_MALI_DVFS
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf);
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+extern int mali_dvfs_level_lock(void);
+extern int mali_dvfs_level_unlock(void);
+#endif
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4.c
new file mode 100644 (file)
index 0000000..482577d
--- /dev/null
@@ -0,0 +1,86 @@
+/* drivers/gpu/mali400/mali/platform/exynos4415/exynos4.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+#include <plat/devs.h>
+#include "mali_kernel_common.h"
+#include "exynos4_pmm.h"
+
+#define MALI_GP_IRQ       EXYNOS4_IRQ_GP_3D
+#define MALI_PP0_IRQ      EXYNOS4_IRQ_PP0_3D
+#define MALI_PP1_IRQ      EXYNOS4_IRQ_PP1_3D
+#define MALI_PP2_IRQ      EXYNOS4_IRQ_PP2_3D
+#define MALI_PP3_IRQ      EXYNOS4_IRQ_PP3_3D
+#define MALI_GP_MMU_IRQ   EXYNOS4_IRQ_GPMMU_3D
+#define MALI_PP0_MMU_IRQ  EXYNOS4_IRQ_PPMMU0_3D
+#define MALI_PP1_MMU_IRQ  EXYNOS4_IRQ_PPMMU1_3D
+#define MALI_PP2_MMU_IRQ  EXYNOS4_IRQ_PPMMU2_3D
+#define MALI_PP3_MMU_IRQ  EXYNOS4_IRQ_PPMMU3_3D
+
+static struct resource mali_gpu_resources[] =
+{
+       MALI_GPU_RESOURCES_MALI400_MP4(0x13000000,
+                                      MALI_GP_IRQ, MALI_GP_MMU_IRQ,
+                                      MALI_PP0_IRQ, MALI_PP0_MMU_IRQ,
+                                      MALI_PP1_IRQ, MALI_PP1_MMU_IRQ,
+                                      MALI_PP2_IRQ, MALI_PP2_MMU_IRQ,
+                                      MALI_PP3_IRQ, MALI_PP3_MMU_IRQ)
+};
+
+static struct mali_gpu_device_data mali_gpu_data =
+{
+       .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+       .fb_start = 0x40000000,
+       .fb_size = 0xb1000000,
+       .utilization_interval = 100, /* 100ms */
+       .utilization_callback = mali_gpu_utilization_handler,
+};
+
+int mali_platform_device_register(void)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+       /* Connect resources to the device */
+       err = platform_device_add_resources(&exynos4_device_g3d, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
+       if (0 == err)
+       {
+               err = platform_device_add_data(&exynos4_device_g3d, &mali_gpu_data, sizeof(mali_gpu_data));
+               if (0 == err)
+               {
+                       mali_platform_init(&(exynos4_device_g3d.dev));
+#ifdef CONFIG_PM_RUNTIME
+                       pm_runtime_set_autosuspend_delay(&(exynos4_device_g3d.dev), 1000);
+                       pm_runtime_use_autosuspend(&(exynos4_device_g3d.dev));
+                       pm_runtime_enable(&(exynos4_device_g3d.dev));
+#endif
+                       return 0;
+               }
+
+       }
+       return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+       mali_platform_deinit(&(exynos4_device_g3d.dev));
+}
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4_pmm.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4_pmm.c
new file mode 100644 (file)
index 0000000..644df57
--- /dev/null
@@ -0,0 +1,941 @@
+/* drivers/gpu/mali400/mali/platform/exynos4415/exynos4_pmm.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "exynos4_pmm.h"
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* lock/unlock CPU freq by Mali */
+#include <linux/types.h>
+#include <mach/cpufreq.h>
+#include <mach/regs-clock-exynos4415.h>
+#include <mach/asv-exynos.h>
+#ifdef CONFIG_CPU_FREQ
+#ifndef CONFIG_SOC_EXYNOS4415
+#define EXYNOS4_ASV_ENABLED
+#endif
+#endif
+
+#ifdef CONFIG_MALI_DVFS_FULL_LEVEL
+#define MALI_DVFS_FULL_LEVEL
+#endif
+
+/* Some defines changed names in later Odroid-A kernels. Make sure it works for both. */
+#ifndef S5P_G3D_CONFIGURATION
+#define S5P_G3D_CONFIGURATION EXYNOS4_G3D_CONFIGURATION
+#endif
+#ifndef S5P_G3D_STATUS
+#define S5P_G3D_STATUS (EXYNOS4_G3D_CONFIGURATION + 0x4)
+#endif
+#ifndef S5P_INT_LOCAL_PWR_EN
+#define S5P_INT_LOCAL_PWR_EN EXYNOS_INT_LOCAL_PWR_EN
+#endif
+
+#include <asm/io.h>
+#include <mach/regs-pmu.h>
+#include <linux/workqueue.h>
+
+#ifdef MALI_DVFS_FULL_LEVEL
+#define MALI_DVFS_STEPS 7
+#else
+#define MALI_DVFS_STEPS 4
+#endif
+
+#define MALI_DVFS_WATING 10 /* msec */
+#define MALI_DVFS_DEFAULT_STEP 1
+#define PD_G3D_LOCK_FLAG 2
+
+#define MALI_DVFS_CLK_DEBUG 0
+#define CPUFREQ_LOCK_DURING_440 1
+
+static int bMaliDvfsRun = 0;
+
+typedef struct mali_dvfs_tableTag{
+       unsigned int clock;
+       unsigned int freq;
+       unsigned int vol;
+       unsigned int downthreshold;
+       unsigned int upthreshold;
+}mali_dvfs_table;
+
+typedef struct mali_dvfs_statusTag{
+       unsigned int currentStep;
+       mali_dvfs_table * pCurrentDvfs;
+
+} mali_dvfs_status_t;
+
+/*dvfs status*/
+mali_dvfs_status_t maliDvfsStatus;
+int mali_dvfs_control;
+
+typedef struct mali_runtime_resumeTag{
+               int clk;
+               int vol;
+               unsigned int step;
+}mali_runtime_resume_table;
+
+mali_runtime_resume_table mali_runtime_resume = {266, 875000, 1};
+
+#ifdef MALI_DVFS_FULL_LEVEL
+/*dvfs table updated on 131203*/
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS]={
+       /*step 0*/{160, 1000000,  850000,  0,  60},
+       /*step 1*/{266, 1000000,  875000, 52,  70},
+       /*step 2*/{350, 1000000,  950000, 62,  80},
+       /*step 3*/{440, 1000000, 1025000, 75,  80},
+       /*step 4*/{550, 1000000, 1125000, 85,  90},
+       /*step 5*/{667, 1000000, 1225000, 85,  95},
+       /*step 6*/{733, 1000000, 1300000, 95, 100} };
+#else
+/*dvfs table updated on 131203*/
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS]={
+       /*step 0*/{160, 1000000,  850000,  0,  70},
+       /*step 1*/{266, 1000000,  875000, 62,  90},
+       /*step 2*/{350, 1000000,  950000, 85,  90},
+       /*step 3*/{440, 1000000, 1025000, 85, 100} };
+#endif
+
+/* Exynos4415 */
+int mali_gpu_clk = 266;
+int mali_gpu_vol = 875000;
+#ifdef MALI_DVFS_FULL_LEVEL
+char *mali_freq_table = "733 667 550 440 350 266 160";
+#else
+char *mali_freq_table = "440 350 266 160";
+#endif
+
+#define EXTXTALCLK_NAME  "ext_xtal"
+#define VPLLSRCCLK_NAME  "vpll_src"
+#define FOUTVPLLCLK_NAME "fout_vpll"
+#define SCLKEPLLCLK_NAME "sclk_epll"
+#define SCLVPLLCLK_NAME  "sclk_vpll"
+#define GPUMOUT1CLK_NAME "mout_g3d1"
+#define GPUMOUT0CLK_NAME "mout_g3d0"
+#define GPUCLK_NAME      "sclk_g3d"
+#define CLK_DIV_STAT_G3D 0x1003C62C
+#define CLK_DESC         "clk-divider-status"
+
+static struct clk *ext_xtal_clock = NULL;
+static struct clk *vpll_src_clock = NULL;
+static struct clk *fout_vpll_clock = NULL;
+static struct clk *sclk_epll_clock = NULL;
+static struct clk *sclk_vpll_clock = NULL;
+static struct clk *mali_parent_clock = NULL;
+static struct clk *mali_clock = NULL;
+
+static unsigned int GPU_MHZ    = 1000000;
+static unsigned int const GPU_ASV_VOLT = 1000;
+static int nPowermode;
+static atomic_t clk_active;
+
+mali_io_address clk_register_map = 0;
+
+/* export GPU frequency as a read-only parameter so that it can be read in /sys */
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(mali_gpu_vol, int, S_IRUSR | S_IRGRP | S_IROTH);
+module_param(mali_freq_table, charp, S_IRUSR | S_IRGRP | S_IROTH);
+#ifdef CONFIG_MALI_DVFS
+module_param(mali_dvfs_control, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_dvfs_control, "Mali Current DVFS");
+DEVICE_ATTR(time_in_state, S_IRUGO|S_IWUSR, show_time_in_state, set_time_in_state);
+MODULE_PARM_DESC(time_in_state, "Time-in-state of Mali DVFS");
+#endif
+MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
+MODULE_PARM_DESC(mali_gpu_vol, "Mali Current Voltage");
+MODULE_PARM_DESC(mali_freq_table, "Mali frequency table");
+
+#ifdef CONFIG_REGULATOR
+struct regulator *g3d_regulator = NULL;
+#endif
+atomic_t mali_cpufreq_lock;
+
+/* DVFS */
+#ifdef CONFIG_MALI_DVFS
+static unsigned int mali_dvfs_utilization = 255;
+static void update_time_in_state(int level);
+u64 mali_dvfs_time[MALI_DVFS_STEPS];
+#endif
+
+static void mali_dvfs_work_handler(struct work_struct *w);
+static struct workqueue_struct *mali_dvfs_wq = 0;
+_mali_osk_mutex_t *mali_dvfs_lock;
+int mali_runtime_resumed = -1;
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_work_handler);
+
+int cpufreq_lock_by_mali(unsigned int freq)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+       unsigned int level;
+
+       if (atomic_read(&mali_cpufreq_lock) == 0) {
+               if (exynos_cpufreq_get_level(freq * 1000, &level)) {
+                       printk(KERN_ERR "Mali: failed to get cpufreq level for %dMHz", freq);
+                       return -EINVAL;
+               }
+
+               if (exynos_cpufreq_lock(DVFS_LOCK_ID_G3D, level)) {
+                       printk(KERN_ERR "Mali: failed to cpufreq lock for L%d", level);
+                       return -EINVAL;
+               }
+
+               atomic_set(&mali_cpufreq_lock, 1);
+               printk(KERN_DEBUG "Mali: cpufreq locked on <%d>%dMHz\n", level, freq);
+       }
+#endif
+       return 0;
+}
+
+void cpufreq_unlock_by_mali(void)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+       if (atomic_read(&mali_cpufreq_lock) == 1) {
+               exynos_cpufreq_lock_free(DVFS_LOCK_ID_G3D);
+               atomic_set(&mali_cpufreq_lock, 0);
+               printk(KERN_DEBUG "Mali: cpufreq locked off\n");
+       }
+#endif
+}
+
+#ifdef CONFIG_REGULATOR
+void mali_regulator_disable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_disable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_disable(g3d_regulator);
+}
+
+void mali_regulator_enable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_enable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_enable(g3d_regulator);
+}
+
+void mali_regulator_set_voltage(int min_uV, int max_uV)
+{
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               return;
+       }
+       MALI_DEBUG_PRINT(1, ("= regulator_set_voltage: %d, %d \n", min_uV, max_uV));
+       regulator_set_voltage(g3d_regulator, min_uV, max_uV);
+       mali_gpu_vol = regulator_get_voltage(g3d_regulator);
+       MALI_DEBUG_PRINT(1, ("Mali voltage: %d\n", mali_gpu_vol));
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+}
+#endif
+
+static unsigned int get_mali_dvfs_status(void)
+{
+       return maliDvfsStatus.currentStep;
+}
+
+mali_bool mali_clk_get(void)
+{
+       if (ext_xtal_clock == NULL)     {
+               ext_xtal_clock = clk_get(NULL, EXTXTALCLK_NAME);
+               if (IS_ERR(ext_xtal_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source ext_xtal_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (vpll_src_clock == NULL)     {
+               vpll_src_clock = clk_get(NULL, VPLLSRCCLK_NAME);
+               if (IS_ERR(vpll_src_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source vpll_src_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (fout_vpll_clock == NULL) {
+               fout_vpll_clock = clk_get(NULL, FOUTVPLLCLK_NAME);
+               if (IS_ERR(fout_vpll_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source fout_vpll_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (sclk_epll_clock == NULL) {
+               sclk_epll_clock = clk_get(NULL, SCLKEPLLCLK_NAME);
+               if (IS_ERR(fout_vpll_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source sclk_epll_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (sclk_vpll_clock == NULL) {
+               sclk_vpll_clock = clk_get(NULL, SCLVPLLCLK_NAME);
+               if (IS_ERR(sclk_vpll_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source sclk_vpll_clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       if (mali_parent_clock == NULL) {
+               mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);
+               if (IS_ERR(mali_parent_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source mali parent clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       /* mali clock get always. */
+       if (mali_clock == NULL) {
+               mali_clock = clk_get(NULL, GPUCLK_NAME);
+               if (IS_ERR(mali_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source mali clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       return MALI_TRUE;
+}
+
+void mali_clk_put(mali_bool binc_mali_clock)
+{
+       if (mali_parent_clock)
+       {
+               clk_put(mali_parent_clock);
+               mali_parent_clock = NULL;
+       }
+
+       if (sclk_vpll_clock)
+       {
+               clk_put(sclk_vpll_clock);
+               sclk_vpll_clock = NULL;
+       }
+
+       if (binc_mali_clock && fout_vpll_clock)
+       {
+               clk_put(fout_vpll_clock);
+               fout_vpll_clock = NULL;
+       }
+
+       if (sclk_epll_clock)
+       {
+               clk_put(sclk_epll_clock);
+               sclk_epll_clock = NULL;
+       }
+
+       if (vpll_src_clock)
+       {
+               clk_put(vpll_src_clock);
+               vpll_src_clock = NULL;
+       }
+
+       if (ext_xtal_clock)
+       {
+               clk_put(ext_xtal_clock);
+               ext_xtal_clock = NULL;
+       }
+
+       if (binc_mali_clock && mali_clock)
+       {
+               clk_put(mali_clock);
+               mali_clock = NULL;
+       }
+}
+
+void mali_clk_set_rate(unsigned int clk, unsigned int mhz)
+{
+       int err;
+       unsigned int read_val;
+       unsigned long rate = (unsigned long)clk * (unsigned long)mhz;
+       unsigned long CurRate = 0;
+
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       MALI_DEBUG_PRINT(3, ("Mali platform: Setting frequency to %d mhz\n", clk));
+
+       if (mali_clk_get() == MALI_FALSE) {
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               return;
+       }
+
+       CurRate = clk_get_rate(mali_clock);
+
+       if (CurRate == 0) {
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               MALI_PRINT_ERROR(("clk_get_rate[mali_clock] is 0 - return\n"));
+               return;
+       }
+
+       err = clk_set_rate(mali_clock, CurRate / 4);
+
+       if (err > 0)
+               MALI_PRINT_ERROR(("Failed to set Mali clock before change PLL: %d\n", err));
+
+       err = clk_set_parent(mali_parent_clock, sclk_epll_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("mali_parent set parent to sclk_epll failed\n"));
+
+       do {
+               cpu_relax();
+               read_val = __raw_readl(EXYNOS4415_CLKMUX_STAT_G3D0);
+       } while (((read_val >> 4) & 0x7) != 0x1);
+
+       MALI_DEBUG_PRINT(3, ("Mali platform: set to EPLL EXYNOS4415_CLKMUX_STAT_G3D0: 0x%08x\n", __raw_readl(EXYNOS4415_CLKMUX_STAT_G3D0)));
+
+       err = clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
+
+       if (err > 0)
+               MALI_PRINT_ERROR(("Failed to set fout_vpll clock: %d\n", err));
+
+
+       err = clk_set_parent(vpll_src_clock, ext_xtal_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("vpll_src set parent to ext_xtal failed\n"));
+
+       err = clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("sclk_vpll set parent to fout_vpll failed\n"));
+
+       MALI_DEBUG_PRINT(3, ("Mali platform: set_parent_vpll : %8.x \n", (__raw_readl(EXYNOS4_CLKSRC_TOP0) >> 8) & 0x1));
+
+       err = clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("mali_parent set parent to sclk_vpll failed\n"));
+
+       do {
+               cpu_relax();
+               read_val = __raw_readl(EXYNOS4415_CLKMUX_STAT_G3D0);
+       } while (((read_val >> 4) & 0x7) != 0x2);
+
+       MALI_DEBUG_PRINT(3, ("SET to VPLL EXYNOS4_CLKMUX_STAT_G3D0 : 0x%08x\n", __raw_readl(EXYNOS4415_CLKMUX_STAT_G3D0)));
+
+       err = clk_set_parent(mali_clock, mali_parent_clock);
+
+       if (err)
+               MALI_PRINT_ERROR(("mali_clock set parent to mali_parent failed\n"));
+
+       if (atomic_read(&clk_active) == 0) {
+               if (clk_enable(mali_clock) < 0) {
+                       _mali_osk_mutex_signal(mali_dvfs_lock);
+                       return;
+               }
+               atomic_set(&clk_active, 1);
+       }
+
+       err = clk_set_rate(mali_clock, rate);
+
+       if (err > 0)
+               MALI_PRINT_ERROR(("Failed to set Mali clock: %d\n", err));
+
+       rate = clk_get_rate(mali_clock);
+
+       MALI_DEBUG_PRINT(1, ("Mali frequency %d\n", rate / mhz));
+       GPU_MHZ = mhz;
+
+       mali_gpu_clk = (int)(rate / mhz);
+       mali_clk_put(MALI_FALSE);
+
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+}
+
+mali_bool set_mali_dvfs_current_step(unsigned int step)
+{
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       maliDvfsStatus.currentStep = step % MALI_DVFS_STEPS;
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+       return MALI_TRUE;
+}
+
+static mali_bool set_mali_dvfs_status(u32 step,mali_bool boostup)
+{
+       u32 validatedStep = step;
+#if MALI_DVFS_CLK_DEBUG
+       unsigned int *pRegMaliClkDiv;
+       unsigned int *pRegMaliMpll;
+#endif
+
+       if(boostup)     {
+#ifdef CONFIG_REGULATOR
+               /*change the voltage*/
+#ifdef EXYNOS4_ASV_ENABLED
+               mali_regulator_set_voltage(get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT), get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+#else
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+#endif
+               /*change the clock*/
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+       } else {
+               /*change the clock*/
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+#ifdef CONFIG_REGULATOR
+#ifdef EXYNOS4_ASV_ENABLED
+               /*change the voltage*/
+               mali_regulator_set_voltage(get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT), get_match_volt(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_dvfs[step].clock * GPU_ASV_VOLT));
+#else
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+#endif
+       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+       mali_clk_put(MALI_FALSE);
+
+#if MALI_DVFS_CLK_DEBUG
+       pRegMaliClkDiv = ioremap(0x1003c52c, 32);
+       pRegMaliMpll = ioremap(0x1003c22c, 32);
+       MALI_PRINT(("Mali MPLL reg:%d, CLK DIV: %d \n", *pRegMaliMpll, *pRegMaliClkDiv));
+#endif
+       set_mali_dvfs_current_step(validatedStep);
+       /*for future use*/
+       maliDvfsStatus.pCurrentDvfs = &mali_dvfs[validatedStep];
+
+#if CPUFREQ_LOCK_DURING_440
+       /* lock/unlock CPU freq by Mali */
+       if (mali_dvfs[step].clock >= 440)
+               cpufreq_lock_by_mali(400);
+       else
+               cpufreq_unlock_by_mali();
+#endif
+       return MALI_TRUE;
+}
+
+static void mali_platform_wating(u32 msec)
+{
+       /*
+       * sample wating
+       * change this in the future with proper check routine.
+       */
+       unsigned int read_val;
+       while(1) {
+               read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
+               if ((read_val & 0x8000)==0x0000) break;
+
+               _mali_osk_time_ubusydelay(100); /* 1000 -> 100 : 20101218 */
+       }
+}
+
+static mali_bool change_mali_dvfs_status(u32 step, mali_bool boostup )
+{
+       MALI_DEBUG_PRINT(4, ("> change_mali_dvfs_status: %d, %d \n", step, boostup));
+
+       if (!set_mali_dvfs_status(step, boostup)) {
+               MALI_DEBUG_PRINT(1, ("error on set_mali_dvfs_status: %d, %d \n",step, boostup));
+               return MALI_FALSE;
+       }
+
+       /* wait until clock and voltage is stablized */
+       mali_platform_wating(MALI_DVFS_WATING); /* msec */
+       return MALI_TRUE;
+}
+
+static unsigned int decideNextStatus(unsigned int utilization)
+{
+       static unsigned int level = 0;
+       int iStepCount = 0;
+
+       if (mali_runtime_resumed >= 0) {
+               level = mali_runtime_resumed;
+               mali_runtime_resumed = -1;
+       }
+
+       if (mali_dvfs_control == 0) {
+               if (utilization > (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].upthreshold / 100) &&
+                               level < MALI_DVFS_STEPS - 1) {
+                       level++;
+               } else if (utilization < (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].downthreshold / 100) &&
+                       level > 0) {
+                       level--;
+               }
+       } else {
+               for (iStepCount = MALI_DVFS_STEPS-1; iStepCount >= 0; iStepCount--) {
+                       if (mali_dvfs_control >= mali_dvfs[iStepCount].clock) {
+                               level = iStepCount;
+                               break;
+                       }
+               }
+       }
+       return level;
+}
+
+static mali_bool mali_dvfs_status(unsigned int utilization)
+{
+       unsigned int nextStatus = 0;
+       unsigned int curStatus = 0;
+       mali_bool boostup = MALI_FALSE;
+       static int stay_count = 5;
+
+       MALI_DEBUG_PRINT(4, ("> mali_dvfs_status: %d \n", utilization));
+
+       /* decide next step */
+       curStatus = get_mali_dvfs_status();
+       nextStatus = decideNextStatus(utilization);
+
+       MALI_DEBUG_PRINT(4, ("= curStatus %d, nextStatus %d, maliDvfsStatus.currentStep %d \n", curStatus, nextStatus, maliDvfsStatus.currentStep));
+       /* if next status is same with current status, don't change anything */
+       if(curStatus != nextStatus) {
+               /*check if boost up or not*/
+               if(maliDvfsStatus.currentStep < nextStatus) {
+                       boostup = 1;
+                       stay_count = 5;
+               } else if (maliDvfsStatus.currentStep > nextStatus){
+                       stay_count--;
+               }
+
+               if( boostup == 1 || stay_count <= 0){
+                       /*change mali dvfs status*/
+#ifdef CONFIG_MALI_DVFS
+                       update_time_in_state(curStatus);
+#endif
+                       if (!change_mali_dvfs_status(nextStatus, boostup)) {
+                               MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
+                               return MALI_FALSE;
+                       }
+                       boostup = 0;
+                       stay_count = 5;
+               }
+       }
+       else
+               stay_count = 5;
+
+       return MALI_TRUE;
+}
+
+static void mali_dvfs_work_handler(struct work_struct *w)
+{
+       bMaliDvfsRun = 1;
+       MALI_DEBUG_PRINT(3, ("=== mali_dvfs_work_handler\n"));
+
+       if(!mali_dvfs_status(mali_dvfs_utilization))
+               MALI_DEBUG_PRINT(1, ( "error on mali dvfs status in mali_dvfs_work_handler"));
+
+       bMaliDvfsRun = 0;
+}
+
+mali_bool init_mali_dvfs_status(void)
+{
+       /*
+       * default status
+       * add here with the right function to get initilization value.
+       */
+       if (!mali_dvfs_wq)
+               mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+
+       /* add a error handling here */
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+
+       return MALI_TRUE;
+}
+
+void deinit_mali_dvfs_status(void)
+{
+       if (mali_dvfs_wq)
+               destroy_workqueue(mali_dvfs_wq);
+
+       mali_dvfs_wq = NULL;
+}
+
+mali_bool mali_dvfs_handler(unsigned int utilization)
+{
+       mali_dvfs_utilization = utilization;
+       queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
+
+       return MALI_TRUE;
+}
+
+static mali_bool init_mali_clock(void)
+{
+       mali_bool ret = MALI_TRUE;
+       nPowermode = MALI_POWER_MODE_DEEP_SLEEP;
+
+       if (mali_clock != 0)
+               return ret; /* already initialized */
+
+       mali_dvfs_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0);
+
+       if (mali_dvfs_lock == NULL)
+               return _MALI_OSK_ERR_FAULT;
+
+       if (!mali_clk_get())
+       {
+               MALI_PRINT(("Error: Failed to get Mali clock\n"));
+               goto err_clk;
+       }
+
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+
+       MALI_PRINT(("init_mali_clock mali_clock %x\n", mali_clock));
+
+#ifdef CONFIG_REGULATOR
+       g3d_regulator = regulator_get(NULL, "vdd_g3d");
+
+       if (IS_ERR(g3d_regulator))
+       {
+               MALI_PRINT( ("MALI Error : failed to get vdd_g3d\n"));
+               ret = MALI_FALSE;
+               goto err_regulator;
+       }
+
+       mali_gpu_vol = mali_runtime_resume.vol;
+#ifdef EXYNOS4_ASV_ENABLED
+       mali_gpu_vol = get_match_volt(ID_G3D, mali_gpu_clk * GPU_ASV_VOLT);
+       mali_runtime_resume.vol = get_match_volt(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT);
+#endif
+
+       regulator_enable(g3d_regulator);
+       mali_regulator_set_voltage(mali_gpu_vol, mali_gpu_vol);
+
+#ifdef EXYNOS4_ASV_ENABLED
+       exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT));
+#endif
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+       mali_clk_put(MALI_FALSE);
+
+       return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+err_regulator:
+       regulator_put(g3d_regulator);
+#endif
+err_clk:
+       mali_clk_put(MALI_TRUE);
+
+       return ret;
+}
+
+static mali_bool deinit_mali_clock(void)
+{
+       if (mali_clock == 0)
+               return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+       if (g3d_regulator)
+       {
+               regulator_put(g3d_regulator);
+               g3d_regulator = NULL;
+       }
+#endif
+       mali_clk_put(MALI_TRUE);
+
+       return MALI_TRUE;
+}
+
+static _mali_osk_errcode_t enable_mali_clocks(void)
+{
+       int err;
+
+       if (atomic_read(&clk_active) == 0) {
+               err = clk_enable(mali_clock);
+               MALI_DEBUG_PRINT(3,("enable_mali_clocks mali_clock %p error %d \n", mali_clock, err));
+               atomic_set(&clk_active, 1);
+       }
+
+       /* set clock rate */
+#ifdef CONFIG_MALI_DVFS
+       if (mali_dvfs_control != 0 || mali_gpu_clk >= mali_runtime_resume.clk) {
+               mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+       } else {
+#ifdef CONFIG_REGULATOR
+               mali_regulator_set_voltage(mali_runtime_resume.vol, mali_runtime_resume.vol);
+#ifdef EXYNOS4_ASV_ENABLED
+               exynos_set_abb(ID_G3D, get_match_abb(ID_G3D, mali_runtime_resume.clk * GPU_ASV_VOLT));
+#endif
+#endif
+               mali_clk_set_rate(mali_runtime_resume.clk, GPU_MHZ);
+               set_mali_dvfs_current_step(mali_runtime_resume.step);
+       }
+#else
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+#endif
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t disable_mali_clocks(void)
+{
+       if (atomic_read(&clk_active)) {
+               clk_disable(mali_clock);
+               MALI_DEBUG_PRINT(3, ("disable_mali_clocks mali_clock %p\n", mali_clock));
+               atomic_set(&clk_active, 0);
+       }
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_init(struct device *dev)
+{
+       MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
+       atomic_set(&clk_active, 0);
+
+#ifdef CONFIG_MALI_DVFS
+       if (!clk_register_map)
+               clk_register_map = _mali_osk_mem_mapioregion(CLK_DIV_STAT_G3D, 0x20, CLK_DESC);
+
+       if (!init_mali_dvfs_status())
+               MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n"));
+
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+#endif
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_ON);
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev)
+{
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_DEEP_SLEEP);
+       deinit_mali_clock();
+
+#ifdef CONFIG_MALI_DVFS
+       deinit_mali_dvfs_status();
+       if (clk_register_map)
+       {
+               _mali_osk_mem_unmapioregion(CLK_DIV_STAT_G3D, 0x20, clk_register_map);
+               clk_register_map = NULL;
+       }
+#endif
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode)
+{
+       switch (power_mode)
+       {
+       case MALI_POWER_MODE_ON:
+               MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n",
+                                       nPowermode ? "powering on" : "already on"));
+               if (nPowermode == MALI_POWER_MODE_LIGHT_SLEEP || nPowermode == MALI_POWER_MODE_DEEP_SLEEP)      {
+                       MALI_DEBUG_PRINT(4, ("enable clock\n"));
+                       enable_mali_clocks();
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                       MALI_PROFILING_EVENT_CHANNEL_GPU |
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+                       nPowermode = power_mode;
+               }
+               break;
+       case MALI_POWER_MODE_DEEP_SLEEP:
+       case MALI_POWER_MODE_LIGHT_SLEEP:
+               MALI_DEBUG_PRINT(3, ("Mali platform: Got %s event, %s\n", power_mode == MALI_POWER_MODE_LIGHT_SLEEP ?
+                                       "MALI_POWER_MODE_LIGHT_SLEEP" : "MALI_POWER_MODE_DEEP_SLEEP",
+                                       nPowermode ? "already off" : "powering off"));
+               if (nPowermode == MALI_POWER_MODE_ON)   {
+                       disable_mali_clocks();
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                       MALI_PROFILING_EVENT_CHANNEL_GPU |
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       0, 0, 0, 0, 0);
+#endif
+                       nPowermode = power_mode;
+               }
+               break;
+       }
+       MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data)
+{
+       if (nPowermode == MALI_POWER_MODE_ON)
+       {
+#ifdef CONFIG_MALI_DVFS
+               if(!mali_dvfs_handler(data->utilization_gpu))
+                       MALI_DEBUG_PRINT(1, ("error on mali dvfs status in utilization\n"));
+#endif
+       }
+}
+
+#ifdef CONFIG_MALI_DVFS
+static void update_time_in_state(int level)
+{
+       u64 current_time;
+       static u64 prev_time = 0;
+
+       if (prev_time == 0)
+               prev_time = get_jiffies_64();
+
+       current_time = get_jiffies_64();
+       mali_dvfs_time[level] += current_time - prev_time;
+       prev_time = current_time;
+}
+
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       ssize_t ret = 0;
+       int i;
+
+       update_time_in_state(maliDvfsStatus.currentStep);
+
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d %llu\n",
+                                               mali_dvfs[i].clock,
+                                               mali_dvfs_time[i]);
+       }
+
+       if (ret < PAGE_SIZE - 1) {
+               ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+       } else {
+               buf[PAGE_SIZE - 2] = '\n';
+               buf[PAGE_SIZE - 1] = '\0';
+               ret = PAGE_SIZE - 1;
+       }
+       return ret;
+}
+
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       int i;
+
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               mali_dvfs_time[i] = 0;
+       }
+       return count;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4_pmm.h b/drivers/gpu/arm/mali400/r4p0_rel0/platform/exynos4415/exynos4_pmm.h
new file mode 100644 (file)
index 0000000..e0f3f61
--- /dev/null
@@ -0,0 +1,93 @@
+/* drivers/gpu/mali400/mali/platform/exynos4415/exynos4_pmm.h
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.h
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifndef __EXYNOS4_PMM_H__
+#define __EXYNOS4_PMM_H__
+
+#include "mali_utgard.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,
+       MALI_POWER_MODE_LIGHT_SLEEP,
+       MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(struct device *dev);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ *  1) MALI_POWER_MODE_ON
+ *  2) MALI_POWER_MODE_LIGHT_SLEEP
+ *  3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data);
+
+#ifdef CONFIG_MALI_DVFS
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf);
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+#endif
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4.c
new file mode 100644 (file)
index 0000000..3f05301
--- /dev/null
@@ -0,0 +1,407 @@
+/* drivers/gpu/mali400/mali/platform/pegasus-m400/exynos4.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+#ifdef CONFIG_MALI_DVFS
+#include "mali_kernel_utilization.h"
+#endif /* CONFIG_MALI_DVFS */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+#include "mali_pm.h"
+
+#include <plat/pd.h>
+
+#include "exynos4_pmm.h"
+
+#if defined(CONFIG_PM_RUNTIME)
+/* We does not need PM NOTIFIER in r3p2 DDK */
+//#define USE_PM_NOTIFIER
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+struct exynos_pm_domain;
+extern struct exynos_pm_domain exynos4_pd_g3d;
+void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, struct exynos_pm_domain *pd);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)
+extern struct platform_device exynos4_device_pd[];
+#else
+extern struct platform_device s5pv310_device_pd[];
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) */
+
+static void mali_platform_device_release(struct device *device);
+
+#if defined(CONFIG_PM_RUNTIME)
+#if defined(USE_PM_NOTIFIER)
+static int mali_os_suspend(struct device *device);
+static int mali_os_resume(struct device *device);
+static int mali_os_freeze(struct device *device);
+static int mali_os_thaw(struct device *device);
+
+static int mali_runtime_suspend(struct device *device);
+static int mali_runtime_resume(struct device *device);
+static int mali_runtime_idle(struct device *device);
+#endif
+#endif
+
+#if defined(CONFIG_ARCH_S5PV310) && !defined(CONFIG_BOARD_HKDKC210)
+
+/* This is for other SMDK boards */
+#define MALI_BASE_IRQ 232
+
+#else
+
+/* This is for the Odroid boards */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
+#define MALI_BASE_IRQ 182
+#else
+#define MALI_BASE_IRQ 150
+#endif
+
+#endif
+
+#define MALI_GP_IRQ       MALI_BASE_IRQ + 9
+#define MALI_PP0_IRQ     MALI_BASE_IRQ + 5
+#define MALI_PP1_IRQ     MALI_BASE_IRQ + 6
+#define MALI_PP2_IRQ     MALI_BASE_IRQ + 7
+#define MALI_PP3_IRQ     MALI_BASE_IRQ + 8
+#define MALI_GP_MMU_IRQ   MALI_BASE_IRQ + 4
+#define MALI_PP0_MMU_IRQ  MALI_BASE_IRQ + 0
+#define MALI_PP1_MMU_IRQ  MALI_BASE_IRQ + 1
+#define MALI_PP2_MMU_IRQ  MALI_BASE_IRQ + 2
+#define MALI_PP3_MMU_IRQ  MALI_BASE_IRQ + 3
+
+static struct resource mali_gpu_resources[] =
+{
+       MALI_GPU_RESOURCES_MALI400_MP4(0x13000000,
+                                                                  MALI_GP_IRQ, MALI_GP_MMU_IRQ,
+                                                                  MALI_PP0_IRQ, MALI_PP0_MMU_IRQ,
+                                                                  MALI_PP1_IRQ, MALI_PP1_MMU_IRQ,
+                                                                  MALI_PP2_IRQ, MALI_PP2_MMU_IRQ,
+                                                                  MALI_PP3_IRQ, MALI_PP3_MMU_IRQ)
+};
+
+#ifdef CONFIG_PM_RUNTIME
+#if defined(USE_PM_NOTIFIER)
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy);
+
+static struct notifier_block mali_pwr_notif_block = {
+       .notifier_call = mali_pwr_suspend_notifier
+};
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+
+#if 0
+static struct dev_pm_ops mali_gpu_device_type_pm_ops =
+{
+#ifndef CONFIG_PM_RUNTIME
+       .suspend = mali_os_suspend,
+       .resume = mali_os_resume,
+#endif
+       .freeze = mali_os_freeze,
+       .thaw = mali_os_thaw,
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = mali_runtime_suspend,
+       .runtime_resume = mali_runtime_resume,
+       .runtime_idle = mali_runtime_idle,
+#endif
+};
+#endif
+
+#if defined(USE_PM_NOTIFIER)
+static struct device_type mali_gpu_device_device_type =
+{
+       .pm = &mali_gpu_device_type_pm_ops,
+};
+#endif
+
+static struct platform_device mali_gpu_device =
+{
+       .name = "mali_dev", /* MALI_SEC MALI_GPU_NAME_UTGARD, */
+       .id = 0,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       /* Set in mali_platform_device_register() for these kernels */
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)
+       .dev.parent = &exynos4_device_pd[PD_G3D].dev,
+#else
+       .dev.parent = &s5pv310_device_pd[PD_G3D].dev,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) */
+       .dev.release = mali_platform_device_release,
+#if 0
+       /*
+        * We temporarily make use of a device type so that we can control the Mali power
+        * from within the mali.ko (since the default platform bus implementation will not do that).
+        * Ideally .dev.pm_domain should be used instead, as this is the new framework designed
+        * to control the power of devices.
+        */
+       .dev.type = &mali_gpu_device_device_type, /* We should probably use the pm_domain instead of type on newer kernels */
+#endif
+       .dev.coherent_dma_mask = DMA_BIT_MASK(32),
+};
+
+static struct mali_gpu_device_data mali_gpu_data =
+{
+       .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+       .fb_start = 0x40000000,
+       .fb_size = 0xb1000000,
+       .utilization_interval = 100, /* 100ms */
+       .utilization_callback = mali_gpu_utilization_handler,
+};
+
+int mali_platform_device_register(void)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+       exynos_pm_add_dev_to_genpd(&mali_gpu_device, &exynos4_pd_g3d);
+#endif
+
+       /* Connect resources to the device */
+       err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
+       if (0 == err)
+       {
+               err = platform_device_add_data(&mali_gpu_device, &mali_gpu_data, sizeof(mali_gpu_data));
+               if (0 == err)
+               {
+#ifdef CONFIG_PM_RUNTIME
+#if defined(USE_PM_NOTIFIER)
+                       err = register_pm_notifier(&mali_pwr_notif_block);
+                       if (err)
+                       {
+                               goto plat_init_err;
+                       }
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+
+                       /* Register the platform device */
+                       err = platform_device_register(&mali_gpu_device);
+                       if (0 == err)
+                       {
+                               mali_platform_init(&(mali_gpu_device.dev));
+
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+                               pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+                               pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+#endif
+                               pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+
+                               return 0;
+                       }
+               }
+
+#ifdef CONFIG_PM_RUNTIME
+#if defined(USE_PM_NOTIFIER)
+plat_init_err:
+               unregister_pm_notifier(&mali_pwr_notif_block);
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+               platform_device_unregister(&mali_gpu_device);
+       }
+
+       return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+#ifdef CONFIG_PM_RUNTIME
+#if defined(USE_PM_NOTIFIER)
+       unregister_pm_notifier(&mali_pwr_notif_block);
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+
+       mali_platform_deinit(&(mali_gpu_device.dev));
+
+       platform_device_unregister(&mali_gpu_device);
+}
+
+static void mali_platform_device_release(struct device *device)
+{
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+}
+
+#ifdef CONFIG_PM_RUNTIME
+#if defined(USE_PM_NOTIFIER)
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy)
+{
+       int err = 0;
+       switch (event)
+       {
+               case PM_SUSPEND_PREPARE:
+                       mali_pm_os_suspend();
+                       err = mali_os_suspend(&(mali_platform_device->dev));
+                       break;
+
+               case PM_POST_SUSPEND:
+                       err = mali_os_resume(&(mali_platform_device->dev));
+                       mali_pm_os_resume();
+                       break;
+               default:
+                       break;
+       }
+       return err;
+}
+
+static int mali_os_suspend(struct device *device)
+{
+       int ret = 0;
+       MALI_DEBUG_PRINT(4, ("mali_os_suspend() called\n"));
+
+#ifdef CONFIG_MALI_DVFS
+       mali_utilization_suspend();
+#endif
+
+       if (NULL != device &&
+               NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->suspend)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->suspend(device);
+       }
+
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_DEEP_SLEEP);
+
+       return ret;
+}
+
+static int mali_os_resume(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_os_resume() called\n"));
+#ifdef CONFIG_REGULATOR
+       mali_regulator_enable();
+       g3d_power_domain_control(1);
+#endif
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_ON);
+
+       if (NULL != device &&
+               NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->resume)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->resume(device);
+       }
+
+       return ret;
+}
+
+static int mali_os_freeze(struct device *device)
+{
+       int ret = 0;
+       MALI_DEBUG_PRINT(4, ("mali_os_freeze() called\n"));
+
+       if (NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->freeze)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->freeze(device);
+       }
+
+       return ret;
+}
+
+static int mali_os_thaw(struct device *device)
+{
+       int ret = 0;
+       MALI_DEBUG_PRINT(4, ("mali_os_thaw() called\n"));
+
+       if (NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->thaw)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->thaw(device);
+       }
+
+       return ret;
+}
+
+static int mali_runtime_suspend(struct device *device)
+{
+       int ret = 0;
+
+       MALI_DEBUG_PRINT(4, ("mali_runtime_suspend() called\n"));
+       if (NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->runtime_suspend)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->runtime_suspend(device);
+       }
+
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_LIGHT_SLEEP);
+
+       return ret;
+}
+
+static int mali_runtime_resume(struct device *device)
+{
+       int ret = 0;
+       MALI_DEBUG_PRINT(4, ("mali_runtime_resume() called\n"));
+
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_ON);
+
+       if (NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->runtime_resume)
+       {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->runtime_resume(device);
+       }
+
+       return ret;
+}
+
+static int mali_runtime_idle(struct device *device)
+{
+       MALI_DEBUG_PRINT(4, ("mali_runtime_idle() called\n"));
+       if (NULL != device->driver &&
+               NULL != device->driver->pm &&
+               NULL != device->driver->pm->runtime_idle)
+       {
+               int ret = 0;
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->runtime_idle(device);
+               if (0 != ret)
+               {
+                       return ret;
+               }
+       }
+
+       return 1;
+}
+
+#endif /* USE_PM_NOTIFIER */
+#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4_pmm.c b/drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4_pmm.c
new file mode 100644 (file)
index 0000000..f8fa420
--- /dev/null
@@ -0,0 +1,1337 @@
+/* drivers/gpu/mali400/mali/platform/pegasus-m400/exynos4_pmm.c
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.c
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "exynos4_pmm.h"
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+#if defined(CONFIG_PM_RUNTIME)
+#include <plat/pd.h>
+#endif
+
+#include <asm/io.h>
+#include <mach/regs-pmu.h>
+#include <mach/cpufreq.h>
+
+#include <linux/workqueue.h>
+
+#define MALI_DVFS_STEPS 5
+#define MALI_DVFS_WATING 10 /* msec */
+#define MALI_DVFS_DEFAULT_STEP 1
+#define PD_G3D_LOCK_FLAG 2
+
+#ifdef CONFIG_CPU_FREQ
+#include <mach/asv.h>
+#define EXYNOS4_ASV_ENABLED
+#endif
+
+#define MALI_DVFS_CLK_DEBUG 0
+#define SEC_THRESHOLD 1
+
+#define CPUFREQ_LOCK_DURING_440 1
+#define CHIPID_REG             (S5P_VA_CHIPID + 0x4)
+
+static int bMaliDvfsRun = 0;
+
+typedef struct mali_dvfs_tableTag{
+       unsigned int clock;
+       unsigned int freq;
+       unsigned int vol;
+#if SEC_THRESHOLD
+       unsigned int downthreshold;
+       unsigned int upthreshold;
+#endif
+}mali_dvfs_table;
+
+typedef struct mali_dvfs_statusTag{
+       unsigned int currentStep;
+       mali_dvfs_table * pCurrentDvfs;
+
+} mali_dvfs_status_t;
+
+/* dvfs status */
+mali_dvfs_status_t maliDvfsStatus;
+int mali_dvfs_control;
+
+typedef struct mali_runtime_resumeTag{
+       int clk;
+       int vol;
+       unsigned int step;
+}mali_runtime_resume_table;
+
+mali_runtime_resume_table mali_runtime_resume = {266, 900000, 1};
+
+/* dvfs table */
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS]={
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+                       /* step 0 */{160  ,1000000      ,875000 , 0   , 70},
+                       /* step 1 */{266  ,1000000      ,900000 ,62   , 90},
+                       /* step 2 */{350  ,1000000      ,950000 ,85   , 90},
+                       /* step 3 */{440  ,1000000      ,1025000   ,85   , 90},
+                       /* step 4 */{533  ,1000000      ,1075000   ,95   ,100} };
+#else
+                       /* step 0 */{134  ,1000000      , 950000   ,85   , 90},
+                       /* step 1 */{267  ,1000000      ,1050000   ,85   ,100} };
+#endif
+
+#ifdef EXYNOS4_ASV_ENABLED
+#define ASV_LEVEL     12       /* ASV0, 1, 11 is reserved */
+#define ASV_LEVEL_PRIME     13 /* ASV0, 1, 12 is reserved */
+#define ASV_LEVEL_PD   13
+
+
+static unsigned int asv_3d_volt_9_table_1ghz_type[MALI_DVFS_STEPS-1][ASV_LEVEL] = {
+       {  975000,  950000,  950000,  950000,  925000,  925000,  925000,  900000,  900000,  900000,  900000,  875000},  /* L3(160Mhz) */
+#if (MALI_DVFS_STEPS > 1)
+       { 1000000,  975000,  975000,  975000,  950000,  950000,  950000,  900000,  900000,  900000,  900000,  875000},  /* L2(266Mhz) */
+#if (MALI_DVFS_STEPS > 2)
+       { 1075000, 1050000, 1050000, 1050000, 1000000, 1000000, 1000000,  975000,  975000,  975000,  975000,  925000},  /* L1(350Mhz) */
+#if (MALI_DVFS_STEPS > 3)
+       { 1125000, 1100000, 1100000, 1100000, 1075000, 1075000, 1075000, 1025000, 1025000, 1025000, 1025000,  975000},  /* L0(440Mhz) */
+#endif
+#endif
+#endif
+};
+static unsigned int asv_3d_volt_9_table[MALI_DVFS_STEPS-1][ASV_LEVEL] = {
+       {  950000,  925000,  900000,  900000,  875000,  875000,  875000,  875000,  850000,  850000,  850000,  850000},  /* L3(160Mhz) */
+#if (MALI_DVFS_STEPS > 1)
+       {  975000,  950000,  925000,  925000,  925000,  900000,  900000,  875000,  875000,  875000,  875000,  850000},  /* L2(266Mhz) */
+#if (MALI_DVFS_STEPS > 2)
+       { 1050000, 1025000, 1000000, 1000000,  975000,  950000,  950000,  950000,  925000,  925000,  925000,  900000},  /* L1(350Mhz) */
+#if (MALI_DVFS_STEPS > 3)
+       { 1100000, 1075000, 1050000, 1050000, 1050000, 1025000, 1025000, 1000000, 1000000, 1000000,  975000,  950000},  /* L0(440Mhz) */
+#endif
+#endif
+#endif
+};
+
+static unsigned int asv_3d_volt_9_table_for_prime[MALI_DVFS_STEPS][ASV_LEVEL_PRIME] = {
+       {  950000,  937500,  925000,  912500,  900000,  887500,  875000,  862500,  875000,  862500,  850000,  850000,  850000},  /* L4(160Mhz) */
+#if (MALI_DVFS_STEPS > 1)
+       {  975000,  962500,  950000,  937500,  925000,  912500,  900000,  887500,  900000,  887500,  875000,  875000,  875000}, /* L3(266Mhz) */
+#if (MALI_DVFS_STEPS > 2)
+       { 1025000, 1012500, 1000000,  987500,  975000,  962500,  950000,  937500,  950000,  937500,  912500,  900000,  887500}, /* L2(350Mhz) */
+#if (MALI_DVFS_STEPS > 3)
+       { 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 1012500, 1000000,  975000,  962500,  950000}, /* L1(440Mhz) */
+#if (MALI_DVFS_STEPS > 4)
+       { 1150000, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1075000, 1062500, 1037500, 1025000, 1012500}, /* L0(533Mhz) */
+#endif
+#endif
+#endif
+#endif
+};
+
+static unsigned int asv_3d_volt_4212_9_table[MALI_DVFS_STEPS][ASV_LEVEL_PD] = {
+       {  950000,  925000,  900000,  900000,  900000,  900000,  900000,  900000,  875000,  850000,  850000,  850000, 850000},  /* L3(160Mhz) */
+#if (MALI_DVFS_STEPS > 1)
+       {  975000,  950000,  925000,  925000,  925000,  925000,  925000,  900000,  900000,  900000,  875000,  875000, 875000},  /* L2(266Mhz) */
+#if (MALI_DVFS_STEPS > 2)
+       { 1025000, 1000000,  975000,  975000,  975000,  950000,  950000,  925000,  925000,  925000,  925000,  900000, 875000},  /* L1(350Mhz) */
+#if (MALI_DVFS_STEPS > 3)
+       { 1100000, 1075000, 1050000, 1050000, 1050000, 1050000, 1025000, 1000000, 1000000,  975000,  975000,  950000, 925000},  /* L0(440Mhz) */
+#endif
+#endif
+#endif
+};
+#endif /* ASV_LEVEL */
+
+#define EXTXTALCLK_NAME  "ext_xtal"
+#define VPLLSRCCLK_NAME  "vpll_src"
+#define FOUTVPLLCLK_NAME "fout_vpll"
+#define SCLVPLLCLK_NAME  "sclk_vpll"
+#define GPUMOUT1CLK_NAME "mout_g3d1"
+
+#define MPLLCLK_NAME    "mout_mpll"
+#define GPUMOUT0CLK_NAME "mout_g3d0"
+#define GPUCLK_NAME      "sclk_g3d"
+#define CLK_DIV_STAT_G3D 0x1003C62C
+#define CLK_DESC                "clk-divider-status"
+
+static struct clk *ext_xtal_clock      = NULL;
+static struct clk *vpll_src_clock      = NULL;
+static struct clk *fout_vpll_clock   = NULL;
+static struct clk *sclk_vpll_clock   = NULL;
+
+static struct clk *mpll_clock          = NULL;
+static struct clk *mali_parent_clock = NULL;
+static struct clk  *mali_mout0_clock = NULL;
+static struct clk *mali_clock          = NULL;
+
+#if defined(CONFIG_CPU_EXYNOS4412) || defined(CONFIG_CPU_EXYNOS4212)
+/* Pegasus */
+static const mali_bool bis_vpll = MALI_TRUE;
+int mali_gpu_clk = 440;
+int mali_gpu_vol = 1025000;
+#else
+/* Orion */
+static const mali_bool bis_vpll = MALI_FALSE;
+int mali_gpu_clk = 267;
+int mali_gpu_vol = 1050000;
+#endif
+
+static unsigned int GPU_MHZ    = 1000000;
+
+int  gpu_power_state;
+static int bPoweroff;
+atomic_t clk_active;
+
+#define MAX_MALI_DVFS_STEPS 5
+static _mali_osk_atomic_t bottomlock_status;
+int bottom_lock_step = 0;
+
+#if MALI_VOLTAGE_LOCK
+int mali_lock_vol = 0;
+static _mali_osk_atomic_t voltage_lock_status;
+static mali_bool mali_vol_lock_flag = 0;
+#endif
+
+/* Declare for sysfs */
+#ifdef CONFIG_MALI_DVFS
+module_param(mali_dvfs_control, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_dvfs_control, "Mali Current DVFS");
+
+DEVICE_ATTR(time_in_state, S_IRUGO|S_IWUSR, show_time_in_state, set_time_in_state);
+MODULE_PARM_DESC(time_in_state, "Time-in-state of Mali DVFS");
+#endif
+
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
+
+module_param(mali_gpu_vol, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(mali_gpu_vol, "Mali Current Voltage");
+
+module_param(gpu_power_state, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(gpu_power_state, "Mali Power State");
+
+#ifdef CONFIG_REGULATOR
+struct regulator *g3d_regulator = NULL;
+#endif
+atomic_t mali_cpufreq_lock;
+
+mali_io_address clk_register_map = 0;
+
+/* DVFS */
+static unsigned int mali_dvfs_utilization = 255;
+u64 mali_dvfs_time[MALI_DVFS_STEPS];
+#ifdef CONFIG_MALI_DVFS
+static void update_time_in_state(int level);
+#endif
+static void mali_dvfs_work_handler(struct work_struct *w);
+static struct workqueue_struct *mali_dvfs_wq = 0;
+extern mali_io_address clk_register_map;
+_mali_osk_mutex_t *mali_dvfs_lock;
+int mali_runtime_resumed = -1;
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_work_handler);
+
+int cpufreq_lock_by_mali(unsigned int freq)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+       unsigned int level;
+
+       if (atomic_read(&mali_cpufreq_lock) == 0) {
+               if (exynos_cpufreq_get_level(freq * 1000, &level)) {
+                       printk(KERN_ERR
+                               "Mali: failed to get cpufreq level for %dMHz",
+                               freq);
+                       return -EINVAL;
+               }
+
+               if (exynos_cpufreq_lock(DVFS_LOCK_ID_G3D, level)) {
+                       printk(KERN_ERR
+                               "Mali: failed to cpufreq lock for L%d", level);
+                       return -EINVAL;
+               }
+
+               atomic_set(&mali_cpufreq_lock, 1);
+               printk(KERN_DEBUG "Mali: cpufreq locked on <%d>%dMHz\n", level,
+                                                                       freq);
+       }
+#endif
+       return 0;
+}
+
+void cpufreq_unlock_by_mali(void)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+       if (atomic_read(&mali_cpufreq_lock) == 1) {
+               exynos_cpufreq_lock_free(DVFS_LOCK_ID_G3D);
+               atomic_set(&mali_cpufreq_lock, 0);
+               printk(KERN_DEBUG "Mali: cpufreq locked off\n");
+       }
+#endif
+}
+
+#ifdef CONFIG_REGULATOR
+void mali_regulator_disable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_disable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_disable(g3d_regulator);
+}
+
+void mali_regulator_enable(void)
+{
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_enable : g3d_regulator is null\n"));
+               return;
+       }
+       regulator_enable(g3d_regulator);
+}
+
+void mali_regulator_set_voltage(int min_uV, int max_uV)
+{
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       if(IS_ERR_OR_NULL(g3d_regulator))
+       {
+               MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               return;
+       }
+       MALI_PRINT(("= regulator_set_voltage: %d, %d \n",min_uV, max_uV));
+       regulator_set_voltage(g3d_regulator, min_uV, max_uV);
+       mali_gpu_vol = regulator_get_voltage(g3d_regulator);
+       MALI_DEBUG_PRINT(1, ("Mali voltage: %d\n", mali_gpu_vol));
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+}
+#endif
+
+unsigned long mali_clk_get_rate(void)
+{
+       return clk_get_rate(mali_clock);
+}
+
+
+static unsigned int get_mali_dvfs_status(void)
+{
+       return maliDvfsStatus.currentStep;
+}
+
+mali_bool mali_clk_get(void)
+{
+       if (bis_vpll)
+       {
+               if (ext_xtal_clock == NULL)
+               {
+                       ext_xtal_clock = clk_get(NULL, EXTXTALCLK_NAME);
+                       if (IS_ERR(ext_xtal_clock)) {
+                               MALI_PRINT(("MALI Error : failed to get source ext_xtal_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (vpll_src_clock == NULL)
+               {
+                       vpll_src_clock = clk_get(NULL, VPLLSRCCLK_NAME);
+                       if (IS_ERR(vpll_src_clock)) {
+                               MALI_PRINT(("MALI Error : failed to get source vpll_src_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (fout_vpll_clock == NULL)
+               {
+                       fout_vpll_clock = clk_get(NULL, FOUTVPLLCLK_NAME);
+                       if (IS_ERR(fout_vpll_clock)) {
+                               MALI_PRINT(("MALI Error : failed to get source fout_vpll_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (sclk_vpll_clock == NULL)
+               {
+                       sclk_vpll_clock = clk_get(NULL, SCLVPLLCLK_NAME);
+                       if (IS_ERR(sclk_vpll_clock)) {
+                               MALI_PRINT(("MALI Error : failed to get source sclk_vpll_clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (mali_parent_clock == NULL)
+               {
+                       mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);
+
+                       if (IS_ERR(mali_parent_clock)) {
+                               MALI_PRINT(( "MALI Error : failed to get source mali parent clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (mali_mout0_clock == NULL)
+               {
+                       mali_mout0_clock = clk_get(NULL, GPUMOUT0CLK_NAME);
+
+                       if (IS_ERR(mali_mout0_clock)) {
+                               MALI_PRINT( ( "MALI Error : failed to get source mali mout0 clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+       }
+       else /* mpll */
+       {
+               if (mpll_clock == NULL)
+               {
+                       mpll_clock = clk_get(NULL, MPLLCLK_NAME);
+
+                       if (IS_ERR(mpll_clock)) {
+                               MALI_PRINT(("MALI Error : failed to get source mpll clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+
+               if (mali_parent_clock == NULL)
+               {
+                       mali_parent_clock = clk_get(NULL, GPUMOUT0CLK_NAME);
+
+                       if (IS_ERR(mali_parent_clock)) {
+                               MALI_PRINT(( "MALI Error : failed to get source mali parent clock\n"));
+                               return MALI_FALSE;
+                       }
+               }
+       }
+
+       /* mali clock get always. */
+       if (mali_clock == NULL)
+       {
+               mali_clock = clk_get(NULL, GPUCLK_NAME);
+
+               if (IS_ERR(mali_clock)) {
+                       MALI_PRINT(("MALI Error : failed to get source mali clock\n"));
+                       return MALI_FALSE;
+               }
+       }
+
+       return MALI_TRUE;
+}
+
+void mali_clk_put(mali_bool binc_mali_clock)
+{
+       if (mali_parent_clock)
+       {
+               clk_put(mali_parent_clock);
+               mali_parent_clock = NULL;
+       }
+
+       if (mali_mout0_clock)
+       {
+               clk_put(mali_mout0_clock);
+               mali_mout0_clock = NULL;
+       }
+
+       if (mpll_clock)
+       {
+               clk_put(mpll_clock);
+               mpll_clock = NULL;
+       }
+
+       if (sclk_vpll_clock)
+       {
+               clk_put(sclk_vpll_clock);
+               sclk_vpll_clock = NULL;
+       }
+
+       if (binc_mali_clock && fout_vpll_clock)
+       {
+               clk_put(fout_vpll_clock);
+               fout_vpll_clock = NULL;
+       }
+
+       if (vpll_src_clock)
+       {
+               clk_put(vpll_src_clock);
+               vpll_src_clock = NULL;
+       }
+
+       if (ext_xtal_clock)
+       {
+               clk_put(ext_xtal_clock);
+               ext_xtal_clock = NULL;
+       }
+
+       if (binc_mali_clock && mali_clock)
+       {
+               clk_put(mali_clock);
+               mali_clock = NULL;
+       }
+}
+
+void mali_clk_set_rate(unsigned int clk, unsigned int mhz)
+{
+       int err;
+       unsigned long rate = (unsigned long)clk * (unsigned long)mhz;
+
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       MALI_DEBUG_PRINT(3, ("Mali platform: Setting frequency to %d mhz\n", clk));
+
+       if (mali_clk_get() == MALI_FALSE) {
+               _mali_osk_mutex_signal(mali_dvfs_lock);
+               return;
+       }
+
+       if (bis_vpll)
+       {
+               clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
+               clk_set_parent(vpll_src_clock, ext_xtal_clock);
+               clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+
+               clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+               clk_set_parent(mali_clock, mali_parent_clock);
+       }
+       else
+       {
+               clk_set_parent(mali_parent_clock, mpll_clock);
+               clk_set_parent(mali_clock, mali_parent_clock);
+       }
+
+       if (atomic_read(&clk_active) == 0) {
+               if (clk_enable(mali_clock) < 0) {
+                       _mali_osk_mutex_signal(mali_dvfs_lock);
+                       return;
+               }
+               atomic_set(&clk_active, 1);
+       }
+
+       err = clk_set_rate(mali_clock, rate);
+       if (err > 0)
+               MALI_PRINT_ERROR(("Failed to set Mali clock: %d\n", err));
+
+       rate = mali_clk_get_rate();
+
+       MALI_PRINT(("Mali frequency %d\n", rate / mhz));
+       GPU_MHZ = mhz;
+       mali_gpu_clk = (int)(rate / mhz);
+
+       mali_clk_put(MALI_FALSE);
+
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+}
+
+int get_mali_dvfs_control_status(void)
+{
+       return mali_dvfs_control;
+}
+
+mali_bool set_mali_dvfs_current_step(unsigned int step)
+{
+       _mali_osk_mutex_wait(mali_dvfs_lock);
+       maliDvfsStatus.currentStep = step % MALI_DVFS_STEPS;
+       if (step >= MALI_DVFS_STEPS)
+               mali_runtime_resumed = maliDvfsStatus.currentStep;
+
+       _mali_osk_mutex_signal(mali_dvfs_lock);
+       return MALI_TRUE;
+}
+
+
+static mali_bool set_mali_dvfs_status(u32 step,mali_bool boostup)
+{
+       u32 validatedStep=step;
+#if MALI_DVFS_CLK_DEBUG
+       unsigned int *pRegMaliClkDiv;
+       unsigned int *pRegMaliMpll;
+#endif
+
+       if(boostup)     {
+#ifdef CONFIG_REGULATOR
+               /* change the voltage */
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+               /* change the clock */
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+       } else {
+               /* change the clock */
+               mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+#ifdef CONFIG_REGULATOR
+               /* change the voltage */
+               mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+       mali_clk_put(MALI_FALSE);
+
+#if MALI_DVFS_CLK_DEBUG
+       pRegMaliClkDiv = ioremap(0x1003c52c,32);
+       pRegMaliMpll = ioremap(0x1003c22c,32);
+       MALI_PRINT(("Mali MPLL reg:%d, CLK DIV: %d \n",*pRegMaliMpll, *pRegMaliClkDiv));
+#endif
+
+#ifdef EXYNOS4_ASV_ENABLED
+       if (samsung_rev() < EXYNOS4412_REV_2_0) {
+               if (mali_dvfs[step].clock == 160)
+                       exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V);
+               else
+                       exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_130V);
+       }
+#endif
+
+       set_mali_dvfs_current_step(validatedStep);
+       /* for future use */
+       maliDvfsStatus.pCurrentDvfs = &mali_dvfs[validatedStep];
+
+#if CPUFREQ_LOCK_DURING_440
+       /* lock/unlock CPU freq by Mali */
+       if (mali_dvfs[step].clock >= 440)
+               cpufreq_lock_by_mali(1200);
+       else
+               cpufreq_unlock_by_mali();
+#endif
+
+
+       return MALI_TRUE;
+}
+
+static void mali_platform_wating(u32 msec)
+{
+       /*
+       * sample wating
+       * change this in the future with proper check routine.
+       */
+       unsigned int read_val;
+       while(1) {
+               read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
+               if ((read_val & 0x8000)==0x0000) break;
+
+               _mali_osk_time_ubusydelay(100); /* 1000 -> 100 : 20101218 */
+       }
+}
+
+static mali_bool change_mali_dvfs_status(u32 step, mali_bool boostup )
+{
+       MALI_DEBUG_PRINT(4, ("> change_mali_dvfs_status: %d, %d \n",step, boostup));
+
+       if (!set_mali_dvfs_status(step, boostup)) {
+               MALI_DEBUG_PRINT(1, ("error on set_mali_dvfs_status: %d, %d \n",step, boostup));
+               return MALI_FALSE;
+       }
+
+       /* wait until clock and voltage is stablized */
+       mali_platform_wating(MALI_DVFS_WATING); /* msec */
+
+       return MALI_TRUE;
+}
+
+#ifdef EXYNOS4_ASV_ENABLED
+extern unsigned int exynos_result_of_asv;
+
+static mali_bool mali_dvfs_table_update(void)
+{
+       unsigned int i, tmp, g3d_lock_volt = 0;
+       unsigned int step_num = MALI_DVFS_STEPS;
+       bool lock_flag_g3d = false;
+
+       if(samsung_rev() < EXYNOS4412_REV_2_0)
+               step_num = MALI_DVFS_STEPS - 1;
+
+       if(soc_is_exynos4412()) {
+               if (exynos_armclk_max == 1000000) {
+                       MALI_PRINT(("::C::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+                       for (i = 0; i < step_num; i++) {
+                               mali_dvfs[i].vol = asv_3d_volt_9_table_1ghz_type[i][exynos_result_of_asv];
+                               MALI_PRINT(("mali_dvfs[%d].vol = %d \n", i, mali_dvfs[i].vol));
+
+                               // Update voltage using for resume
+                               if (mali_runtime_resume.clk == mali_dvfs[i].clock) {
+                                       mali_runtime_resume.vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("mali_runtime_resume.vol = %d \n", mali_runtime_resume.vol));
+                               }
+
+                               // update voltage using for init timing
+                               if (mali_gpu_clk == mali_dvfs[i].clock) {
+                                       mali_gpu_vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("init_gpu_vol = %d \n", mali_gpu_vol));
+                               }
+                       }
+               } else if(((is_special_flag() >> G3D_LOCK_FLAG) & 0x1) && (samsung_rev() >= EXYNOS4412_REV_2_0)) {
+                       MALI_PRINT(("::L::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+                       for (i = 0; i < step_num; i++) {
+                               mali_dvfs[i].vol = asv_3d_volt_9_table_for_prime[i][exynos_result_of_asv] + 25000;
+                               MALI_PRINT(("mali_dvfs[%d].vol = %d \n ", i, mali_dvfs[i].vol));
+
+                               // Update voltage using for resume
+                               if (mali_runtime_resume.clk == mali_dvfs[i].clock) {
+                                       mali_runtime_resume.vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("mali_runtime_resume.vol = %d \n", mali_runtime_resume.vol));
+                               }
+
+                               // update voltage using for init timing
+                               if (mali_gpu_clk == mali_dvfs[i].clock) {
+                                       mali_gpu_vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("init_gpu_vol = %d \n", mali_gpu_vol));
+                               }
+                       }
+               } else if (samsung_rev() >= EXYNOS4412_REV_2_0) {
+                       MALI_PRINT(("::P::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+                       for (i = 0; i < step_num; i++) {
+                               mali_dvfs[i].vol = asv_3d_volt_9_table_for_prime[i][exynos_result_of_asv];
+                               MALI_PRINT(("mali_dvfs[%d].vol = %d \n", i, mali_dvfs[i].vol));
+
+                               // Update voltage using for resume
+                               if (mali_runtime_resume.clk == mali_dvfs[i].clock) {
+                                       mali_runtime_resume.vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("mali_runtime_resume.vol = %d \n", mali_runtime_resume.vol));
+                               }
+
+                               // update voltage using for init timing
+                               if (mali_gpu_clk == mali_dvfs[i].clock) {
+                                       mali_gpu_vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("init_gpu_vol = %d \n", mali_gpu_vol));
+                               }
+                       }
+               } else {
+                       MALI_PRINT(("::Q::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+                       for (i = 0; i < step_num; i++) {
+                               mali_dvfs[i].vol = asv_3d_volt_9_table[i][exynos_result_of_asv];
+                               MALI_PRINT(("mali_dvfs[%d].vol = %d \n", i, mali_dvfs[i].vol));
+
+                               // Update voltage using for resume
+                               if (mali_runtime_resume.clk == mali_dvfs[i].clock) {
+                                       mali_runtime_resume.vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("mali_runtime_resume.vol = %d \n", mali_runtime_resume.vol));
+                               }
+
+                               // update voltage using for init timing
+                               if (mali_gpu_clk == mali_dvfs[i].clock) {
+                                       mali_gpu_vol = mali_dvfs[i].vol;
+
+                                       MALI_PRINT(("init_gpu_vol = %d \n", mali_gpu_vol));
+                               }
+                       }
+               }
+       }
+       else if(soc_is_exynos4212()) {
+               tmp = __raw_readl(CHIPID_REG);
+               lock_flag_g3d = (tmp >> PD_G3D_LOCK_FLAG) & 0x1;
+               if (lock_flag_g3d)
+                       g3d_lock_volt = 25000;
+
+               for (i = 0; i < step_num; i++) {
+                       MALI_PRINT((":::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+                       mali_dvfs[i].vol = asv_3d_volt_4212_9_table[i][exynos_result_of_asv] + g3d_lock_volt;
+                       MALI_PRINT(("mali_dvfs[%d].vol = %d\n", i, mali_dvfs[i].vol));
+
+                       // Update voltage using for resume
+                       if (mali_runtime_resume.clk == mali_dvfs[i].clock) {
+                               mali_runtime_resume.vol = mali_dvfs[i].vol;
+
+                               MALI_PRINT(("mali_runtime_resume.vol = %d \n", mali_runtime_resume.vol));
+                       }
+
+                       // update voltage using for init timing
+                       if (mali_gpu_clk == mali_dvfs[i].clock) {
+                               mali_gpu_vol = mali_dvfs[i].vol;
+
+                               MALI_PRINT(("init_gpu_vol = %d \n", mali_gpu_vol));
+                       }
+               }
+       }
+
+       return MALI_TRUE;
+}
+#endif
+
+static unsigned int decideNextStatus(unsigned int utilization)
+{
+       static unsigned int level = 0;
+       int iStepCount = 0;
+       if (mali_runtime_resumed >= 0) {
+               level = mali_runtime_resumed;
+               mali_runtime_resumed = -1;
+       }
+
+       if (mali_dvfs_control == 0 && level == get_mali_dvfs_status()) {
+               if (utilization > (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].upthreshold / 100) &&
+                               level < MALI_DVFS_STEPS - 1) {
+                       level++;
+                       if ((samsung_rev() < EXYNOS4412_REV_2_0) && 3 == get_mali_dvfs_status()) {
+                               level=get_mali_dvfs_status();
+                       }
+               }
+               else if (utilization < (int)(255 * mali_dvfs[maliDvfsStatus.currentStep].downthreshold / 100) &&
+                               level > 0) {
+                       level--;
+               }
+
+               if (_mali_osk_atomic_read(&bottomlock_status) > 0) {
+                       if (level < bottom_lock_step)
+                               level = bottom_lock_step;
+               }
+       } else {
+               for (iStepCount = MALI_DVFS_STEPS-1; iStepCount >= 0; iStepCount--) {
+                       if ( mali_dvfs_control >= mali_dvfs[iStepCount].clock ) {
+                               level = iStepCount;
+                               break;
+                       }
+               }
+       }
+
+       return level;
+}
+
+
+static mali_bool mali_dvfs_status(unsigned int utilization)
+{
+       unsigned int nextStatus = 0;
+       unsigned int curStatus = 0;
+       mali_bool boostup = MALI_FALSE;
+       static int stay_count = 5;
+
+       MALI_DEBUG_PRINT(4, ("> mali_dvfs_status: %d \n",utilization));
+
+       /* decide next step */
+       curStatus = get_mali_dvfs_status();
+       nextStatus = decideNextStatus(utilization);
+
+       MALI_DEBUG_PRINT(4, ("= curStatus %d, nextStatus %d, maliDvfsStatus.currentStep %d \n", curStatus, nextStatus, maliDvfsStatus.currentStep));
+       /* if next status is same with current status, don't change anything */
+       if(curStatus != nextStatus) {
+               /*check if boost up or not*/
+               if(maliDvfsStatus.currentStep < nextStatus) {
+                       boostup = 1;
+                       stay_count = 5;
+               } else if (maliDvfsStatus.currentStep > nextStatus){
+                       stay_count--;
+               }
+               if( boostup == 1 || stay_count <= 0){
+                       /*change mali dvfs status*/
+#ifdef CONFIG_MALI_DVFS
+                       update_time_in_state(curStatus);
+#endif
+                       if (!change_mali_dvfs_status(nextStatus,boostup)) {
+                               MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
+                               return MALI_FALSE;
+                       }
+                       boostup = 0;
+                       stay_count = 5;
+               }
+       }
+       else
+               stay_count = 5;
+       return MALI_TRUE;
+}
+
+
+int mali_dvfs_is_running(void)
+{
+       return bMaliDvfsRun;
+}
+
+
+static void mali_dvfs_work_handler(struct work_struct *w)
+{
+       bMaliDvfsRun=1;
+
+       MALI_DEBUG_PRINT(3, ("=== mali_dvfs_work_handler\n"));
+
+       if(!mali_dvfs_status(mali_dvfs_utilization))
+       MALI_DEBUG_PRINT(1, ( "error on mali dvfs status in mali_dvfs_work_handler"));
+
+       bMaliDvfsRun=0;
+}
+
+mali_bool init_mali_dvfs_status(void)
+{
+       /*
+       * default status
+       * add here with the right function to get initilization value.
+       */
+
+       if (!mali_dvfs_wq)
+               mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+
+       _mali_osk_atomic_init(&bottomlock_status, 0);
+
+       /* add a error handling here */
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+
+       return MALI_TRUE;
+}
+
+void deinit_mali_dvfs_status(void)
+{
+       if (mali_dvfs_wq)
+               destroy_workqueue(mali_dvfs_wq);
+
+       _mali_osk_atomic_term(&bottomlock_status);
+
+       mali_dvfs_wq = NULL;
+}
+
+mali_bool mali_dvfs_handler(unsigned int utilization)
+{
+       mali_dvfs_utilization = utilization;
+       queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
+
+       return MALI_TRUE;
+}
+
+static mali_bool init_mali_clock(void)
+{
+       mali_bool ret = MALI_TRUE;
+       gpu_power_state = 1;
+       bPoweroff = 1;
+
+       if (mali_clock != 0)
+               return ret; /* already initialized */
+
+       mali_dvfs_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0);
+       if (mali_dvfs_lock == NULL)
+               return _MALI_OSK_ERR_FAULT;
+
+       if (!mali_clk_get())
+       {
+               MALI_PRINT(("Error: Failed to get Mali clock\n"));
+               goto err_clk;
+       }
+
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+
+       MALI_PRINT(("init_mali_clock mali_clock %x\n", mali_clock));
+
+#ifdef CONFIG_REGULATOR
+       g3d_regulator = regulator_get(NULL, "vdd_g3d");
+
+       if (IS_ERR(g3d_regulator))
+       {
+               MALI_PRINT(("MALI Error : failed to get vdd_g3d\n"));
+               ret = MALI_FALSE;
+               goto err_regulator;
+       }
+
+       regulator_enable(g3d_regulator);
+       mali_regulator_set_voltage(mali_gpu_vol, mali_gpu_vol);
+
+#ifdef EXYNOS4_ASV_ENABLED
+       if (samsung_rev() < EXYNOS4412_REV_2_0) {
+               if (mali_gpu_clk == 160)
+                       exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V);
+               else
+                       exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_130V);
+       }
+#endif
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+                       MALI_PROFILING_EVENT_CHANNEL_GPU|
+                       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                       mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+#endif
+
+       mali_clk_put(MALI_FALSE);
+
+       return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+err_regulator:
+       regulator_put(g3d_regulator);
+#endif
+err_clk:
+       mali_clk_put(MALI_TRUE);
+
+       return ret;
+}
+
+static mali_bool deinit_mali_clock(void)
+{
+       if (mali_clock == 0)
+               return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+       if (g3d_regulator)
+       {
+               regulator_put(g3d_regulator);
+               g3d_regulator = NULL;
+       }
+#endif
+
+       mali_clk_put(MALI_TRUE);
+
+       return MALI_TRUE;
+}
+
+
+static _mali_osk_errcode_t enable_mali_clocks(void)
+{
+       int err;
+
+       if (atomic_read(&clk_active) == 0) {
+               err = clk_enable(mali_clock);
+               MALI_DEBUG_PRINT(3,("enable_mali_clocks mali_clock %p error %d \n", mali_clock, err));
+               atomic_set(&clk_active, 1);
+               gpu_power_state = 1;
+       }
+
+       /* set clock rate */
+#ifdef CONFIG_MALI_DVFS
+       if (get_mali_dvfs_control_status() != 0 || mali_gpu_clk >= mali_runtime_resume.clk) {
+               mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+       } else {
+#ifdef CONFIG_REGULATOR
+               mali_regulator_set_voltage(mali_runtime_resume.vol, mali_runtime_resume.vol);
+
+#ifdef EXYNOS4_ASV_ENABLED
+               if (samsung_rev() < EXYNOS4412_REV_2_0) {
+                       if (mali_runtime_resume.clk == 160)
+                               exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V);
+                       else
+                               exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_130V);
+               }
+#endif
+#endif
+               mali_clk_set_rate(mali_runtime_resume.clk, GPU_MHZ);
+               set_mali_dvfs_current_step(mali_runtime_resume.step);
+       }
+#else
+       mali_clk_set_rate((unsigned int)mali_gpu_clk, GPU_MHZ);
+       maliDvfsStatus.currentStep = MALI_DVFS_DEFAULT_STEP;
+#endif
+
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t disable_mali_clocks(void)
+{
+       if (atomic_read(&clk_active) == 1) {
+               clk_disable(mali_clock);
+               atomic_set(&clk_active, 0);
+               gpu_power_state = 0;
+       }
+       MALI_DEBUG_PRINT(3, ("disable_mali_clocks mali_clock %p \n", mali_clock));
+
+       MALI_SUCCESS;
+}
+
+/* Some defines changed names in later Odroid-A kernels. Make sure it works for both. */
+#ifndef S5P_G3D_CONFIGURATION
+#define S5P_G3D_CONFIGURATION S5P_PMU_G3D_CONF
+#endif
+#ifndef S5P_G3D_STATUS
+#define S5P_G3D_STATUS S5P_PMU_G3D_CONF + 0x4
+#endif
+
+_mali_osk_errcode_t g3d_power_domain_control(int bpower_on)
+{
+       if (bpower_on)
+       {
+               void __iomem *status;
+               u32 timeout;
+               __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_G3D_CONFIGURATION);
+               status = S5P_G3D_STATUS;
+
+               timeout = 10;
+               while ((__raw_readl(status) & S5P_INT_LOCAL_PWR_EN)
+                       != S5P_INT_LOCAL_PWR_EN) {
+                       if (timeout == 0) {
+                               MALI_PRINTF(("Power domain  enable failed.\n"));
+                               return -ETIMEDOUT;
+                       }
+                       timeout--;
+                       _mali_osk_time_ubusydelay(100);
+               }
+       }
+       else
+       {
+               void __iomem *status;
+               u32 timeout;
+               __raw_writel(0, S5P_G3D_CONFIGURATION);
+
+               status = S5P_G3D_STATUS;
+               /* Wait max 1ms */
+               timeout = 10;
+               while (__raw_readl(status) & S5P_INT_LOCAL_PWR_EN)
+               {
+                       if (timeout == 0) {
+                               MALI_PRINTF(("Power domain  disable failed.\n" ));
+                               return -ETIMEDOUT;
+                       }
+                       timeout--;
+                       _mali_osk_time_ubusydelay(100);
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_init(struct device *dev)
+{
+#ifdef EXYNOS4_ASV_ENABLED
+       mali_dvfs_table_update();
+#endif
+
+       MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
+
+       atomic_set(&clk_active, 0);
+
+#ifdef CONFIG_MALI_DVFS
+       /* Create sysfs for time-in-state */
+       if (device_create_file(dev, &dev_attr_time_in_state)) {
+               dev_err(dev, "Couldn't create sysfs file [time_in_state]\n");
+       }
+
+       if (!clk_register_map) clk_register_map = _mali_osk_mem_mapioregion( CLK_DIV_STAT_G3D, 0x20, CLK_DESC );
+       if (!init_mali_dvfs_status())
+               MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n"));
+#endif
+
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_ON);
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev)
+{
+
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_DEEP_SLEEP);
+       deinit_mali_clock();
+
+#ifdef CONFIG_MALI_DVFS
+       deinit_mali_dvfs_status();
+       if (clk_register_map )
+       {
+               _mali_osk_mem_unmapioregion(CLK_DIV_STAT_G3D, 0x20, clk_register_map);
+               clk_register_map = NULL;
+       }
+#endif
+
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode)
+{
+       switch (power_mode)
+       {
+               case MALI_POWER_MODE_ON:
+                       MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n",
+                                                                bPoweroff ? "powering on" : "already on"));
+                       if (bPoweroff == 1)
+                       {
+#if !defined(CONFIG_PM_RUNTIME)
+                               g3d_power_domain_control(1);
+#endif
+                               MALI_DEBUG_PRINT(4, ("enable clock \n"));
+                               enable_mali_clocks();
+#if defined(CONFIG_MALI400_PROFILING)
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                               mali_gpu_clk, mali_gpu_vol/1000, 0, 0, 0);
+
+#endif
+                               bPoweroff=0;
+                       }
+                       break;
+               case MALI_POWER_MODE_LIGHT_SLEEP:
+               case MALI_POWER_MODE_DEEP_SLEEP:
+                       MALI_DEBUG_PRINT(3, ("Mali platform: Got %s event, %s\n", power_mode ==
+                                               MALI_POWER_MODE_LIGHT_SLEEP ?  "MALI_POWER_MODE_LIGHT_SLEEP" :
+                                               "MALI_POWER_MODE_DEEP_SLEEP", bPoweroff ? "already off" : "powering off"));
+                       if (bPoweroff == 0)
+                       {
+                               disable_mali_clocks();
+#if defined(CONFIG_MALI400_PROFILING)
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                               MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                               MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                               0, 0, 0, 0, 0);
+#endif
+
+#if !defined(CONFIG_PM_RUNTIME)
+                               g3d_power_domain_control(0);
+#endif
+                               bPoweroff=1;
+                       }
+
+                       break;
+       }
+       MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data)
+{
+       if (bPoweroff==0)
+       {
+#ifdef CONFIG_MALI_DVFS
+               if(!mali_dvfs_handler(data->utilization_gpu))
+                       MALI_DEBUG_PRINT(1, ("error on mali dvfs status in utilization\n"));
+#endif
+       }
+}
+
+int mali_dvfs_bottom_lock_push(int lock_step)
+{
+       int prev_status = _mali_osk_atomic_read(&bottomlock_status);
+
+       if (prev_status < 0) {
+               MALI_PRINT(("gpu bottom lock status is not valid for push\n"));
+               return -1;
+       }
+       if (bottom_lock_step < lock_step) {
+               bottom_lock_step = lock_step;
+               if (get_mali_dvfs_status() < lock_step) {
+                       mali_regulator_set_voltage(mali_dvfs[lock_step].vol, mali_dvfs[lock_step].vol);
+                       mali_clk_set_rate(mali_dvfs[lock_step].clock, mali_dvfs[lock_step].freq);
+                       set_mali_dvfs_current_step(lock_step);
+               }
+       }
+       return _mali_osk_atomic_inc_return(&bottomlock_status);
+}
+
+int mali_dvfs_bottom_lock_pop(void)
+{
+       int prev_status = _mali_osk_atomic_read(&bottomlock_status);
+       if (prev_status <= 0) {
+               MALI_PRINT(("gpu bottom lock status is not valid for pop\n"));
+               return -1;
+       } else if (prev_status >= 1) {
+               bottom_lock_step = 0;
+               MALI_PRINT(("gpu bottom lock release\n"));
+       }
+
+       return _mali_osk_atomic_dec_return(&bottomlock_status);
+}
+
+int mali_dvfs_get_vol(int step)
+{
+       step = step % MAX_MALI_DVFS_STEPS;
+       MALI_DEBUG_ASSERT(step<MAX_MALI_DVFS_STEPS);
+       return mali_dvfs[step].vol;
+}
+
+#if MALI_VOLTAGE_LOCK
+int mali_voltage_lock_push(int lock_vol)
+{
+       int prev_status = _mali_osk_atomic_read(&voltage_lock_status);
+
+       if (prev_status < 0) {
+               MALI_PRINT(("gpu voltage lock status is not valid for push\n"));
+               return -1;
+       }
+       if (prev_status == 0) {
+               mali_lock_vol = lock_vol;
+               if (mali_gpu_vol < mali_lock_vol)
+                       mali_regulator_set_voltage(mali_lock_vol, mali_lock_vol);
+       } else {
+               MALI_PRINT(("gpu voltage lock status is already pushed, current lock voltage : %d\n", mali_lock_vol));
+               return -1;
+       }
+
+       return _mali_osk_atomic_inc_return(&voltage_lock_status);
+}
+
+int mali_voltage_lock_pop(void)
+{
+       if (_mali_osk_atomic_read(&voltage_lock_status) <= 0) {
+               MALI_PRINT(("gpu voltage lock status is not valid for pop\n"));
+               return -1;
+       }
+       return _mali_osk_atomic_dec_return(&voltage_lock_status);
+}
+
+int mali_voltage_lock_init(void)
+{
+       mali_vol_lock_flag = MALI_TRUE;
+
+       MALI_SUCCESS;
+}
+
+int mali_vol_get_from_table(int vol)
+{
+       int i;
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               if (mali_dvfs[i].vol >= vol)
+                       return mali_dvfs[i].vol;
+       }
+       MALI_PRINT(("Failed to get voltage from mali_dvfs table, maximum voltage is %d uV\n", mali_dvfs[MALI_DVFS_STEPS-1].vol));
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_MALI_DVFS
+static void update_time_in_state(int level)
+{
+       u64 current_time;
+       static u64 prev_time=0;
+
+       if (prev_time ==0)
+               prev_time=get_jiffies_64();
+
+       current_time = get_jiffies_64();
+       mali_dvfs_time[level] += current_time-prev_time;
+       prev_time = current_time;
+}
+
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       ssize_t ret = 0;
+       int i;
+
+       update_time_in_state(maliDvfsStatus.currentStep);
+
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n",
+                               mali_dvfs[i].clock,
+                               mali_dvfs_time[i]);
+       }
+
+       if (ret < PAGE_SIZE - 1) {
+               ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
+       } else {
+               buf[PAGE_SIZE-2] = '\n';
+               buf[PAGE_SIZE-1] = '\0';
+               ret = PAGE_SIZE-1;
+       }
+
+       return ret;
+}
+
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       int i;
+
+       for (i = 0; i < MALI_DVFS_STEPS; i++) {
+               mali_dvfs_time[i] = 0;
+       }
+
+       return count;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4_pmm.h b/drivers/gpu/arm/mali400/r4p0_rel0/platform/pegasus-m400/exynos4_pmm.h
new file mode 100644 (file)
index 0000000..1b9cf57
--- /dev/null
@@ -0,0 +1,121 @@
+/* drivers/gpu/mali400/mali/platform/pegasus-m400/exynos4_pmm.h
+ *
+ * Copyright 2011 by S.LSI. Samsung Electronics Inc.
+ * San#24, Nongseo-Dong, Giheung-Gu, Yongin, Korea
+ *
+ * Samsung SoC Mali400 DVFS driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software FoundatIon.
+ */
+
+/**
+ * @file exynos4_pmm.h
+ * Platform specific Mali driver functions for the exynos 4XXX based platforms
+ */
+
+#ifndef __EXYNOS4_PMM_H__
+#define __EXYNOS4_PMM_H__
+
+#include "mali_utgard.h"
+#include "mali_osk.h"
+#include <linux/platform_device.h>
+/* @Enable or Disable Mali GPU Bottom Lock feature */
+#define MALI_GPU_BOTTOM_LOCK 1
+#define MALI_VOLTAGE_LOCK 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+       MALI_POWER_MODE_ON,
+       MALI_POWER_MODE_LIGHT_SLEEP,
+       MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(struct device *dev);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(struct device *dev);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ *  1) MALI_POWER_MODE_ON
+ *  2) MALI_POWER_MODE_LIGHT_SLEEP
+ *  3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(struct device *dev, mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data);
+
+_mali_osk_errcode_t g3d_power_domain_control(int bpower_on);
+
+#ifdef CONFIG_REGULATOR
+void mali_regulator_disable(void);
+void mali_regulator_enable(void);
+void mali_regulator_set_voltage(int min_uV, int max_uV);
+#endif
+
+#ifdef CONFIG_MALI_DVFS
+ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf);
+ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+#ifdef CONFIG_CPU_EXYNOS4210
+#if MALI_GPU_BOTTOM_LOCK
+int mali_dvfs_bottom_lock_push(void);
+int mali_dvfs_bottom_lock_pop(void);
+#endif
+#else
+int mali_dvfs_bottom_lock_push(int lock_step);
+int mali_dvfs_bottom_lock_pop(void);
+#endif
+#endif
+
+#if MALI_VOLTAGE_LOCK
+int mali_voltage_lock_push(int lock_vol);
+int mali_voltage_lock_pop(void);
+int mali_voltage_lock_init(void);
+int mali_vol_get_from_table(int vol);
+#endif
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/regs/mali_200_regs.h b/drivers/gpu/arm/mali400/r4p0_rel0/regs/mali_200_regs.h
new file mode 100644 (file)
index 0000000..9a9f1c9
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ *  Enum for management register addresses.
+ */
+enum mali200_mgmt_reg {
+       MALI200_REG_ADDR_MGMT_VERSION                              = 0x1000,
+       MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR               = 0x1004,
+       MALI200_REG_ADDR_MGMT_STATUS                               = 0x1008,
+       MALI200_REG_ADDR_MGMT_CTRL_MGMT                            = 0x100c,
+
+       MALI200_REG_ADDR_MGMT_INT_RAWSTAT                          = 0x1020,
+       MALI200_REG_ADDR_MGMT_INT_CLEAR                            = 0x1024,
+       MALI200_REG_ADDR_MGMT_INT_MASK                             = 0x1028,
+       MALI200_REG_ADDR_MGMT_INT_STATUS                           = 0x102c,
+
+       MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW                   = 0x1044,
+
+       MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS                     = 0x1050,
+
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE                    = 0x1080,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC                       = 0x1084,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE                     = 0x108c,
+
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE                    = 0x10a0,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC                       = 0x10a4,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE                     = 0x10ac,
+
+       MALI200_REG_ADDR_MGMT_PERFMON_CONTR                        = 0x10b0,
+       MALI200_REG_ADDR_MGMT_PERFMON_BASE                         = 0x10b4,
+
+       MALI200_REG_SIZEOF_REGISTER_BANK                           = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+       MALI200_REG_VAL_CTRL_MGMT_STOP_BUS         = (1<<0),
+       MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES     = (1<<3),
+       MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET      = (1<<5),
+       MALI200_REG_VAL_CTRL_MGMT_START_RENDERING  = (1<<6),
+       MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET     = (1<<7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+       MALI200_REG_VAL_IRQ_END_OF_FRAME          = (1<<0),
+       MALI200_REG_VAL_IRQ_END_OF_TILE           = (1<<1),
+       MALI200_REG_VAL_IRQ_HANG                  = (1<<2),
+       MALI200_REG_VAL_IRQ_FORCE_HANG            = (1<<3),
+       MALI200_REG_VAL_IRQ_BUS_ERROR             = (1<<4),
+       MALI200_REG_VAL_IRQ_BUS_STOP              = (1<<5),
+       MALI200_REG_VAL_IRQ_CNT_0_LIMIT           = (1<<6),
+       MALI200_REG_VAL_IRQ_CNT_1_LIMIT           = (1<<7),
+       MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR  = (1<<8),
+       MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW  = (1<<10),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW   = (1<<11),
+       MALI400PP_REG_VAL_IRQ_RESET_COMPLETED       = (1<<12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL  ((enum mali200_mgmt_irq) (\
+    MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+    MALI200_REG_VAL_IRQ_END_OF_TILE                            |\
+    MALI200_REG_VAL_IRQ_HANG                                   |\
+    MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+    MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+    MALI200_REG_VAL_IRQ_BUS_STOP                               |\
+    MALI200_REG_VAL_IRQ_CNT_0_LIMIT                            |\
+    MALI200_REG_VAL_IRQ_CNT_1_LIMIT                            |\
+    MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+    MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW                    |\
+    MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+    MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+    MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+    MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+    MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+    MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+    MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+       MALI200_REG_VAL_STATUS_RENDERING_ACTIVE     = (1<<0),
+       MALI200_REG_VAL_STATUS_BUS_STOPPED          = (1<<4),
+};
+
+enum mali200_render_unit {
+       MALI200_REG_ADDR_FRAME = 0x0000,
+       MALI200_REG_ADDR_RSW   = 0x0004,
+       MALI200_REG_ADDR_STACK = 0x0030,
+       MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+       MALI200_REG_ADDR_ORIGIN_OFFSET_X  = 0x0040
+};
+
+enum mali200_wb_unit {
+       MALI200_REG_ADDR_WB0 = 0x0100,
+       MALI200_REG_ADDR_WB1 = 0x0200,
+       MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+       MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+       MALI200_REG_ADDR_WB_SOURCE_ADDR   = 0x0004,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/regs/mali_gp_regs.h b/drivers/gpu/arm/mali400/r4p0_rel0/regs/mali_gp_regs.h
new file mode 100644 (file)
index 0000000..00ef70c
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+       MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR           = 0x00,
+       MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR             = 0x04,
+       MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR         = 0x08,
+       MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR           = 0x0c,
+       MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR     = 0x10,
+       MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR       = 0x14,
+       MALIGP2_REG_ADDR_MGMT_CMD                       = 0x20,
+       MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT               = 0x24,
+       MALIGP2_REG_ADDR_MGMT_INT_CLEAR                 = 0x28,
+       MALIGP2_REG_ADDR_MGMT_INT_MASK                  = 0x2C,
+       MALIGP2_REG_ADDR_MGMT_INT_STAT                  = 0x30,
+       MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW           = 0x34,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE         = 0x3C,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE         = 0x40,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC            = 0x44,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC            = 0x48,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE          = 0x4C,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE          = 0x50,
+       MALIGP2_REG_ADDR_MGMT_STATUS                    = 0x68,
+       MALIGP2_REG_ADDR_MGMT_VERSION                   = 0x6C,
+       MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ      = 0x80,
+       MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ     = 0x84,
+       MALIGP2_CONTR_AXI_BUS_ERROR_STAT                = 0x94,
+       MALIGP2_REGISTER_ADDRESS_SPACE_SIZE             = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ *  @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum {
+       MALIGP2_REG_VAL_CMD_START_VS                    = (1<< 0),
+       MALIGP2_REG_VAL_CMD_START_PLBU                  = (1<< 1),
+       MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC   = (1<< 4),
+       MALIGP2_REG_VAL_CMD_RESET                               = (1<< 5),
+       MALIGP2_REG_VAL_CMD_FORCE_HANG                  = (1<< 6),
+       MALIGP2_REG_VAL_CMD_STOP_BUS                    = (1<< 9),
+       MALI400GP_REG_VAL_CMD_SOFT_RESET                = (1<<10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/**  @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ *  @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ *       MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG                (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG          (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR          (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS  (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+       (\
+               MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+               MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+               MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+               MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          | \
+               MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        | \
+               MALIGP2_REG_VAL_IRQ_HANG                | \
+               MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+               MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    | \
+               MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    | \
+               MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+               MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+               MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+               MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     | \
+               MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+               MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+               MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+               MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+       (\
+               MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+               MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+               MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+               MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+               MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+               MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+               MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+               MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+               MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+               MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+               MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ *  @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE         0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED       0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE       0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR         0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR   0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+       MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+       MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+       MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+       MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ *  @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+       MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644 (file)
index 0000000..a6b1d76
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644 (file)
index 0000000..961c185
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+       /*
+        * reset counters and overflow flags
+        */
+
+       u32 mask = (1 << 0) | /* enable all three counters */
+                  (0 << 1) | /* reset both Count Registers to 0x0 */
+                  (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+                  (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+                  (0 << 4) | /* Count Register 0 interrupt enable */
+                  (0 << 5) | /* Count Register 1 interrupt enable */
+                  (0 << 6) | /* Cycle Counter interrupt enable */
+                  (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+                  (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+                  (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+       __asm__ __volatile__ ("MCR    p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+       u32 result;
+
+       /* this is for the clock cycles */
+       __asm__ __volatile__ ("MRC    p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+       return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-default/mali_timestamp.c b/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-default/mali_timestamp.c
new file mode 100644 (file)
index 0000000..a6b1d76
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-default/mali_timestamp.h b/drivers/gpu/arm/mali400/r4p0_rel0/timestamp-default/mali_timestamp.h
new file mode 100644 (file)
index 0000000..94b842a
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+       return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/.gitignore b/drivers/gpu/arm/mali400/ump/.gitignore
new file mode 100644 (file)
index 0000000..aca8da4
--- /dev/null
@@ -0,0 +1 @@
+arch
\ No newline at end of file
diff --git a/drivers/gpu/arm/mali400/ump/Kbuild b/drivers/gpu/arm/mali400/ump/Kbuild
new file mode 100644 (file)
index 0000000..c5505fa
--- /dev/null
@@ -0,0 +1,74 @@
+#
+# Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+# Set default configuration to use, if Makefile didn't provide one.
+# Change this to use a different config.h
+# MALI_SEC
+# CONFIG ?= os_memory_64m
+CONFIG ?= release
+
+# Validate selected config
+ifneq ($(shell [ -d $(srctree)/$(src)/arch-$(CONFIG) ] && [ -f  $(srctree)/$(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(src)/arch ] && rm $(src)/arch)
+$(shell ln -sf arch-$(CONFIG) $(srctree)/$(src)/arch)
+endif
+
+UDD_FILE_PREFIX = ../mali/
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV := $(shell ((svnversion | grep -qv "\(exported\|Unversioned\)" && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+ccflags-y += -DSVN_REV=$(SVN_REV)
+ccflags-y += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+ccflags-y += -I$(srctree)/$(src) -I$(srctree)/$(src)/common -I$(srctree)/$(src)/linux -I$(srctree)/$(src)/../mali/common -I$(srctree)/$(src)/../mali/linux -I$(srctree)/$(src)/../../ump/include/ump
+# MALI_SEC
+ccflags-y += -I$(srctree)/$(src)/include
+ccflags-y += -DUSING_MEMORY=1 -DUMP_MEM_SIZE=512
+
+ccflags-y += -DMALI_STATE_TRACKING=0
+ccflags-$(CONFIG_UMP_DEBUG) += -DDEBUG
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(srctree)/$(src)/linux/license/gpl/*),)
+ccflags-y += -I$(srctree)/$(src)/linux/license/proprietary
+else
+ccflags-y += -I$(srctree)/$(src)/linux/license/gpl
+endif
+
+ump-y = common/ump_kernel_common.o \
+       common/ump_kernel_descriptor_mapping.o \
+       common/ump_kernel_api.o \
+       common/ump_kernel_ref_drv.o \
+       linux/ump_kernel_linux.o \
+       linux/ump_kernel_memory_backend_os.o \
+       linux/ump_kernel_memory_backend_dedicated.o \
+       linux/ump_memory_backend.o \
+       linux/ump_ukk_wrappers.o \
+       linux/ump_ukk_ref_wrappers.o \
+       linux/ump_osk_atomics.o \
+       linux/ump_osk_low_level_mem.o \
+       linux/ump_osk_misc.o
+
+# MALI_SEC
+#      $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \
+#      $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \
+#      $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \
+#      $(UDD_FILE_PREFIX)linux/mali_osk_math.o \
+#      $(UDD_FILE_PREFIX)linux/mali_osk_misc.o
+
+obj-$(CONFIG_MALI400_UMP) := ump.o
+
diff --git a/drivers/gpu/arm/mali400/ump/Kconfig b/drivers/gpu/arm/mali400/ump/Kconfig
new file mode 100644 (file)
index 0000000..13785e2
--- /dev/null
@@ -0,0 +1,7 @@
+config UMP_DEBUG
+       bool "Enable extra debug in UMP"
+       depends on MALI400_UMP
+       default n
+       ---help---
+         This enabled extra debug checks and messages in UMP.
+
diff --git a/drivers/gpu/arm/mali400/ump/Makefile b/drivers/gpu/arm/mali400/ump/Makefile
new file mode 100644 (file)
index 0000000..912cf47
--- /dev/null
@@ -0,0 +1,67 @@
+#
+# Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+export ARCH ?= arm
+BUILD ?= debug
+
+check_cc2 = \
+       $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+       then \
+               echo "$(2)"; \
+       else \
+               echo "$(3)"; \
+       fi ;)
+
+# Check that required parameters are supplied.
+ifeq ($(CONFIG),)
+$(error "CONFIG must be specified.")
+endif
+ifeq ($(CPU)$(KDIR),)
+$(error "KDIR or CPU must be specified.")
+endif
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+export CONFIG
+
+export CONFIG_MALI400_UMP := m
+ifeq ($(BUILD),debug)
+export CONFIG_UMP_DEBUG := y
+else
+export CONFIG_UMP_DEBUG := n
+endif
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+all:
+       $(MAKE) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+       $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+       $(MAKE) -C $(KDIR) M=$(CURDIR) clean
+       $(MAKE) -C $(KDIR) M=$(CURDIR)/../mali clean
diff --git a/drivers/gpu/arm/mali400/ump/Makefile.common b/drivers/gpu/arm/mali400/ump/Makefile.common
new file mode 100644 (file)
index 0000000..0eb2558
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
+       $(UMP_FILE_PREFIX)common/ump_kernel_descriptor_mapping.c \
+       $(UMP_FILE_PREFIX)common/ump_kernel_api.c \
+       $(UMP_FILE_PREFIX)common/ump_kernel_ref_drv.c
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
diff --git a/drivers/gpu/arm/mali400/ump/arch-pb-virtex5/config.h b/drivers/gpu/arm/mali400/ump/arch-pb-virtex5/config.h
new file mode 100644 (file)
index 0000000..a61ca2b
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT          0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT   0xE1000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 16UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/arch-pegasus-m400/config.h b/drivers/gpu/arm/mali400/ump/arch-pegasus-m400/config.h
new file mode 100644 (file)
index 0000000..0b8dd5a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_UMP_H__
+#define __ARCH_CONFIG_UMP_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT               USING_MEMORY
+#if (USING_MEMORY == 0) /* Dedicated Memory */
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT        0x2C000000
+#else
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT        0
+#endif
+
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT           UMP_MEM_SIZE*1024*1024
+#endif /* __ARCH_CONFIG_UMP_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/arch-release/config.h b/drivers/gpu/arm/mali400/ump/arch-release/config.h
new file mode 100644 (file)
index 0000000..0b8dd5a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_UMP_H__
+#define __ARCH_CONFIG_UMP_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT               USING_MEMORY
+#if (USING_MEMORY == 0) /* Dedicated Memory */
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT        0x2C000000
+#else
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT        0
+#endif
+
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT           UMP_MEM_SIZE*1024*1024
+#endif /* __ARCH_CONFIG_UMP_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_api.c b/drivers/gpu/arm/mali400/ump/common/ump_kernel_api.c
new file mode 100644 (file)
index 0000000..f45fc3a
--- /dev/null
@@ -0,0 +1,580 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+       ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+       return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+       ump_dd_mem * mem;
+
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+       if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+               return UMP_DD_HANDLE_INVALID;
+       }
+
+       ump_dd_reference_add(mem);
+
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       return (ump_dd_handle)mem;
+}
+/* MALI_SEC */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get(ump_secure_id secure_id)
+{
+       ump_dd_mem * mem;
+
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+       if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+               return UMP_DD_HANDLE_INVALID;
+       }
+
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       return (ump_dd_handle)mem;
+}
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+       ump_dd_mem * mem = (ump_dd_mem*) memh;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+       ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       if (blocks == NULL)
+       {
+               DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+               return UMP_DD_INVALID;
+       }
+
+       if (mem->nr_blocks != num_blocks)
+       {
+               DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+               return UMP_DD_INVALID;
+       }
+
+       DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+       _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+       return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+{
+       ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       if (block == NULL)
+       {
+               DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+               return UMP_DD_INVALID;
+       }
+
+       if (index >= mem->nr_blocks)
+       {
+               DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+               return UMP_DD_INVALID;
+       }
+
+       DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+       *block = mem->block_array[index];
+
+       return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+       ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+       return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+       ump_dd_mem * mem = (ump_dd_mem*)memh;
+       int new_ref;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+       DBG_MSG(5, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+       int new_ref;
+       ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+       DEBUG_ASSERT_POINTER(mem);
+
+       /* We must hold this mutex while doing the atomic_dec_and_read, to protect
+       that elements in the ump_descriptor_mapping table is always valid.  If they
+       are not, userspace may accidently map in this secure_ids right before its freed
+       giving a mapped backdoor into unallocated memory.*/
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+
+       DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+
+       if (0 == new_ref)
+       {
+               DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+               ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
+
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+       /*
+        * when ump descriptor imported to dmabuf is released,
+        * physical memory region to the ump descriptor should be
+        * released only through dma_buf_put().
+        * if dma_buf_put() is called then file's refcount to
+        * the dmabuf becomes 0 and release func of exporter will be
+        * called by file->f_op->release to release the physical
+        * memory region finally.
+        */
+       if (mem->import_attach) {
+               struct dma_buf_attachment *attach = mem->import_attach;
+               struct dma_buf *dmabuf = attach->dmabuf;
+
+               if (mem->sgt)
+                       dma_buf_unmap_attachment(attach, mem->sgt,
+                                               DMA_NONE);
+
+               dma_buf_detach(dmabuf, attach);
+               mem->import_attach = NULL;
+               dma_buf_put(dmabuf);
+       }
+#endif
+
+               mem->release_func(mem->ctx, mem);
+               _mali_osk_free(mem);
+       }
+       else
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       }
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+{
+       ump_session_data * session_data;
+
+       DEBUG_ASSERT_POINTER( args );
+       DEBUG_ASSERT_POINTER( args->ctx );
+
+       session_data = (ump_session_data *)args->ctx;
+
+       /* check compatability */
+       if (args->version == UMP_IOCTL_API_VERSION)
+       {
+               DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
+               args->compatible = 1;
+               session_data->api_version = args->version;
+       }
+       else if (args->version == MAKE_VERSION_ID(1))
+       {
+               DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+               args->compatible = 1;
+               session_data->api_version = args->version;
+       }
+       else
+       {
+               DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+               args->compatible = 0;
+               args->version = UMP_IOCTL_API_VERSION; /* report our version */
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+{
+       ump_session_memory_list_element * session_memory_element;
+       ump_session_memory_list_element * tmp;
+       ump_session_data * session_data;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+       int secure_id;
+
+       DEBUG_ASSERT_POINTER( release_info );
+       DEBUG_ASSERT_POINTER( release_info->ctx );
+
+       /* Retreive the session data */
+       session_data = (ump_session_data*)release_info->ctx;
+
+       /* If there are many items in the memory session list we
+        * could be de-referencing this pointer a lot so keep a local copy
+        */
+       secure_id = release_info->secure_id;
+
+       DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+       /* Iterate through the memory list looking for the requested secure ID */
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+       {
+               if ( session_memory_element->mem->secure_id == secure_id)
+               {
+                       ump_dd_mem *release_mem;
+
+                       release_mem = session_memory_element->mem;
+                       _mali_osk_list_del(&session_memory_element->list);
+                       ump_dd_reference_release(release_mem);
+                       _mali_osk_free(session_memory_element);
+
+                       ret = _MALI_OSK_ERR_OK;
+                       break;
+               }
+       }
+
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+       DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+       return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+{
+       ump_dd_mem * mem;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+       DEBUG_ASSERT_POINTER( user_interaction );
+
+       /* We lock the mappings so things don't get removed while we are looking for the memory */
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
+       {
+               user_interaction->size = mem->size_bytes;
+               DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+               ret = _MALI_OSK_ERR_OK;
+       }
+       else
+       {
+                user_interaction->size = 0;
+               DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+       }
+
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       return ret;
+}
+
+
+
+void _ump_ukk_msync( _ump_uk_msync_s *args )
+{
+       ump_dd_mem * mem = NULL;
+       void *virtual = NULL;
+       u32 size = 0;
+       u32 offset = 0;
+
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+       if (NULL == mem)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+       /* Ensure the memory doesn't dissapear when we are flushing it. */
+       ump_dd_reference_add(mem);
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       /* Returns the cache settings back to Userspace */
+       args->is_cached=mem->is_cached;
+
+       /* If this flag is the only one set, we should not do the actual flush, only the readout */
+       if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
+       {
+               DBG_MSG(3, ("_ump_ukk_msync READOUT  ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+               goto msync_release_and_return;
+       }
+
+       /* Nothing to do if the memory is not caches */
+       if ( 0==mem->is_cached )
+       {
+               DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d  OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+               goto msync_release_and_return;
+       }
+       DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync  Flush  OP: %d Address: 0x%08x Mapping: 0x%08x\n",
+                   (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
+
+       if ( args->address )
+       {
+               virtual = (void *)((u32)args->address);
+               offset = (u32)((args->address) - (args->mapping));
+       } else {
+               /* Flush entire mapping when no address is specified. */
+               virtual = args->mapping;
+       }
+       if ( args->size )
+       {
+               size = args->size;
+       } else {
+               /* Flush entire mapping when no size is specified. */
+               size = mem->size_bytes - offset;
+       }
+
+       if ( (offset + size) > mem->size_bytes )
+       {
+               DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
+               goto msync_release_and_return;
+       }
+
+       /* The actual cache flush - Implemented for each OS*/
+       _ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
+
+msync_release_and_return:
+       ump_dd_reference_release(mem);
+       return;
+}
+
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
+{
+       ump_session_data * session_data;
+       ump_uk_cache_op_control op;
+
+       DEBUG_ASSERT_POINTER( args );
+       DEBUG_ASSERT_POINTER( args->ctx );
+
+       op = args->op;
+       session_data = (ump_session_data *)args->ctx;
+
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       if ( op== _UMP_UK_CACHE_OP_START )
+       {
+               session_data->cache_operations_ongoing++;
+               DBG_MSG(4, ("Cache ops start\n" ));
+               if ( session_data->cache_operations_ongoing != 1 )
+               {
+                       DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
+               }
+       }
+       else if ( op== _UMP_UK_CACHE_OP_FINISH )
+       {
+               DBG_MSG(4, ("Cache ops finish\n"));
+               session_data->cache_operations_ongoing--;
+               #if 0
+               if ( session_data->has_pending_level1_cache_flush)
+               {
+                       /* This function will set has_pending_level1_cache_flush=0 */
+                       _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+               }
+               #endif
+
+               /* to be on the safe side: always flush l1 cache when cache operations are done */
+               _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+               DBG_MSG(4, ("Cache ops finish end\n" ));
+       }
+       else
+       {
+               DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
+       }
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
+{
+       ump_dd_mem * mem = NULL;
+       ump_uk_user old_user;
+       ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
+       ump_session_data *session_data;
+
+       DEBUG_ASSERT_POINTER( args );
+       DEBUG_ASSERT_POINTER( args->ctx );
+
+       session_data = (ump_session_data *)args->ctx;
+
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+       if (NULL == mem)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+
+       old_user = mem->hw_device;
+       mem->hw_device = args->new_user;
+
+       DBG_MSG(3, ("UMP[%02u] Switch usage  Start  New: %s  Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
+
+       if ( ! mem->is_cached )
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+
+       if ( old_user == args->new_user)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+       if (
+                /* Previous AND new is both different from CPU */
+                (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU  )
+          )
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+
+       if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) )
+       {
+               cache_op =_UMP_UK_MSYNC_INVALIDATE;
+               DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
+#ifdef UMP_SKIP_INVALIDATION
+#error
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
+               return;
+#endif
+       }
+       /* Ensure the memory doesn't dissapear when we are flushing it. */
+       ump_dd_reference_add(mem);
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       /* Actual cache flush */
+       _ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       ump_dd_reference_release(mem);
+       DBG_MSG(4, ("UMP[%02u] Switch usage  Finish\n", (ump_secure_id)args->secure_id));
+       return;
+}
+
+void _ump_ukk_lock(_ump_uk_lock_s *args )
+{
+       ump_dd_mem * mem = NULL;
+
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+       if (NULL == mem)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+       ump_dd_reference_add(mem);
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
+
+       mem->lock_usage = (ump_lock_usage) args->lock_usage;
+
+       /** TODO: TAKE LOCK HERE */
+
+       ump_dd_reference_release(mem);
+}
+
+void _ump_ukk_unlock(_ump_uk_unlock_s *args )
+{
+       ump_dd_mem * mem = NULL;
+
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+       if (NULL == mem)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
+               return;
+       }
+       ump_dd_reference_add(mem);
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
+
+       mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
+
+       /** TODO: RELEASE LOCK HERE */
+
+       ump_dd_reference_release(mem);
+}
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_common.c b/drivers/gpu/arm/mali400/ump/common/ump_kernel_common.c
new file mode 100644 (file)
index 0000000..27b816e
--- /dev/null
@@ -0,0 +1,416 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128  )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+       _mali_osk_errcode_t err;
+
+       /* Perform OS Specific initialization */
+       err = _ump_osk_init();
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+               return err;
+       }
+
+       /* Init the global device */
+       _mali_osk_memset(&device, 0, sizeof(device) );
+
+       /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+       device.secure_id_map_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0 , 0);
+       if (NULL == device.secure_id_map_lock)
+       {
+               MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+       if (NULL == device.secure_id_map)
+       {
+               _mali_osk_lock_term(device.secure_id_map_lock);
+               MSG_ERR(("Failed to create secure id lookup table\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       /* Init memory backend */
+       device.backend = ump_memory_backend_create();
+       if (NULL == device.backend)
+       {
+               MSG_ERR(("Failed to create memory backend\n"));
+               _mali_osk_lock_term(device.secure_id_map_lock);
+               ump_descriptor_mapping_destroy(device.secure_id_map);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+       DEBUG_ASSERT_POINTER(device.secure_id_map);
+       DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
+
+       _mali_osk_lock_term(device.secure_id_map_lock);
+       device.secure_id_map_lock = NULL;
+
+       ump_descriptor_mapping_destroy(device.secure_id_map);
+       device.secure_id_map = NULL;
+
+       device.backend->shutdown(device.backend);
+       device.backend = NULL;
+
+       ump_memory_backend_destroy();
+
+       _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open( void** context )
+{
+       struct ump_session_data * session_data;
+
+       /* allocated struct to track this session */
+       session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+       if (NULL == session_data)
+       {
+               MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       session_data->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0);
+       if( NULL == session_data->lock )
+       {
+               MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+               _mali_osk_free(session_data);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+
+       if ( NULL == session_data->cookies_map )
+       {
+               MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+               _mali_osk_lock_term( session_data->lock );
+               _mali_osk_free( session_data );
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+       _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+       /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+          that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+          Current and later API versions would do an additional call to this IOCTL and update this variable
+          to the correct one.*/
+       session_data->api_version = MAKE_VERSION_ID(1);
+
+       *context = (void*)session_data;
+
+       session_data->cache_operations_ongoing = 0 ;
+       session_data->has_pending_level1_cache_flush = 0;
+
+       DBG_MSG(2, ("New session opened\n"));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close( void** context )
+{
+       struct ump_session_data * session_data;
+       ump_session_memory_list_element * item;
+       ump_session_memory_list_element * tmp;
+
+       session_data = (struct ump_session_data *)*context;
+       if (NULL == session_data)
+       {
+               MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       /* Unmap any descriptors mapped in. */
+       if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list))
+       {
+               ump_memory_allocation *descriptor;
+               ump_memory_allocation *temp;
+
+               DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+               /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+               _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list)
+               {
+                       _ump_uk_unmap_mem_s unmap_args;
+                       DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+                                   descriptor->phys_addr, descriptor->size, descriptor->mapping));
+                       unmap_args.ctx = (void*)session_data;
+                       unmap_args.mapping = descriptor->mapping;
+                       unmap_args.size = descriptor->size;
+                       unmap_args._ukk_private = NULL; /* NOTE: unused */
+                       unmap_args.cookie = descriptor->cookie;
+
+                       /* NOTE: This modifies the list_head_session_memory_mappings_list */
+                       _ump_ukk_unmap_mem( &unmap_args );
+               }
+       }
+
+       /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+        * can fail silently. */
+       DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+
+       _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+       {
+               _mali_osk_list_del(&item->list);
+               DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+               ump_dd_reference_release(item->mem);
+               _mali_osk_free(item);
+       }
+
+       ump_descriptor_mapping_destroy( session_data->cookies_map );
+
+       _mali_osk_lock_term(session_data->lock);
+       _mali_osk_free(session_data);
+
+       DBG_MSG(2, ("Session closed\n"));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+{
+       struct ump_session_data * session_data;
+       ump_memory_allocation * descriptor;  /* Describes current mapping of memory */
+       _mali_osk_errcode_t err;
+       unsigned long offset = 0;
+       unsigned long left;
+       ump_dd_handle handle;  /* The real UMP handle for this memory. Its real datatype is ump_dd_mem*  */
+       ump_dd_mem * mem;      /* The real UMP memory. It is equal to the handle, but with exposed struct */
+       u32 block;
+       int map_id;
+
+       session_data = (ump_session_data *)args->ctx;
+       if( NULL == session_data )
+       {
+               MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+       /* MALI_SEC */
+       /* SEC kernel stability 2012-02-17 */
+       if (NULL == session_data->cookies_map)
+       {
+               MSG_ERR(("session_data->cookies_map is NULL in _ump_ukk_map_mem()\n"));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+       descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+       if (NULL == descriptor)
+       {
+               MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+       if ( UMP_DD_HANDLE_INVALID == handle)
+       {
+               _mali_osk_free(descriptor);
+               DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mem = (ump_dd_mem*)handle;
+       DEBUG_ASSERT(mem);
+       if (mem->size_bytes != args->size)
+       {
+               _mali_osk_free(descriptor);
+               ump_dd_reference_release(handle);
+               DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+
+       if (map_id < 0)
+       {
+               _mali_osk_free(descriptor);
+               ump_dd_reference_release(handle);
+               DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       descriptor->size = args->size;
+       descriptor->handle = handle;
+       descriptor->phys_addr = args->phys_addr;
+       descriptor->process_mapping_info = args->_ukk_private;
+       descriptor->ump_session = session_data;
+       descriptor->cookie = (u32)map_id;
+
+       if ( mem->is_cached )
+       {
+               descriptor->is_cached = 1;
+               args->is_cached       = 1;
+               DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+       }
+       else if ( args->is_cached) /* MALI_SEC */
+       {
+               mem->is_cached = 1;
+               descriptor->is_cached = 1;
+               DBG_MSG(3, ("Warning mapping UMP secure_id: %d. As cached, while it was allocated uncached.\n", args->secure_id));
+       }
+       else
+       {
+               descriptor->is_cached = 0;
+               args->is_cached       = 0;
+               DBG_MSG(3, ("Mapping UMP secure_id: %d  as Uncached.\n", args->secure_id));
+       }
+
+       _mali_osk_list_init( &descriptor->list );
+
+       err = _ump_osk_mem_mapregion_init( descriptor );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+               ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+               _mali_osk_free(descriptor);
+               ump_dd_reference_release(mem);
+               return err;
+       }
+
+       DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+               mem->secure_id,
+               mem->size_bytes,
+               ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+               mem->nr_blocks));
+
+       left = descriptor->size;
+       /* loop over all blocks and map them in */
+       for (block = 0; block < mem->nr_blocks; block++)
+       {
+               unsigned long size_to_map;
+
+               if (left >  mem->block_array[block].size)
+               {
+                       size_to_map = mem->block_array[block].size;
+               }
+               else
+               {
+                       size_to_map = left;
+               }
+
+               if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) )
+               {
+                       DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+                       ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+                       ump_dd_reference_release(mem);
+                       _ump_osk_mem_mapregion_term( descriptor );
+                       _mali_osk_free(descriptor);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               left -= size_to_map;
+               offset += size_to_map;
+       }
+
+       /* Add to the ump_memory_allocation tracking list */
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       args->mapping = descriptor->mapping;
+       args->cookie = descriptor->cookie;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+       struct ump_session_data * session_data;
+       ump_memory_allocation * descriptor;
+       ump_dd_handle handle;
+
+       session_data = (ump_session_data *)args->ctx;
+
+       if( NULL == session_data )
+       {
+               MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+               return;
+       }
+       /* MALI_SEC */
+       /* SEC kernel stability 2012-02-17 */
+       if (NULL == session_data->cookies_map)
+       {
+               MSG_ERR(("session_data->cookies_map is NULL in _ump_ukk_map_mem()\n"));
+               return;
+       }
+       if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) )
+       {
+               MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+               return;
+       }
+
+       DEBUG_ASSERT_POINTER(descriptor);
+
+       handle = descriptor->handle;
+       if ( UMP_DD_HANDLE_INVALID == handle)
+       {
+               DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+               return;
+       }
+
+       /* Remove the ump_memory_allocation from the list of tracked mappings */
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       _mali_osk_list_del( &descriptor->list );
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+
+       ump_dd_reference_release(handle);
+
+       _ump_osk_mem_mapregion_term( descriptor );
+       _mali_osk_free(descriptor);
+}
+
+u32 _ump_ukk_report_memory_usage( void )
+{
+       if(device.backend->stat)
+               return device.backend->stat(device.backend);
+       else
+               return 0;
+}
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_common.h b/drivers/gpu/arm/mali400/ump/common/ump_kernel_common.h
new file mode 100644 (file)
index 0000000..75afa0d
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_COMMON_H__
+#define __UMP_KERNEL_COMMON_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+       extern int ump_debug_level;
+       #define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+       #define UMP_DEBUG_CODE(args) args
+       #define DBG_MSG(level,args)  do { /* args should be in brackets */ \
+               ((level) <=  ump_debug_level)?\
+               UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+               UMP_DEBUG_PRINT(args):0; \
+               } while (0)
+
+       #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+               if((condition)&&((level) <=  ump_debug_level)) {\
+               UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+               UMP_DEBUG_PRINT(args); \
+               }
+
+       #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+               else if((level) <=  ump_debug_level) { \
+               UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+               UMP_DEBUG_PRINT(args); \
+               }
+
+       #define DEBUG_ASSERT_POINTER(pointer) do  {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+       #define DEBUG_ASSERT(condition) do  {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+       #define UMP_DEBUG_PRINT(args) do {} while(0)
+       #define UMP_DEBUG_CODE(args)
+       #define DBG_MSG(level,args) do {} while(0)
+       #define DBG_MSG_IF(level,condition,args) do {} while(0)
+       #define DBG_MSG_ELSE(level,args) do {} while(0)
+       #define DEBUG_ASSERT(condition) do {} while(0)
+       #define DEBUG_ASSERT_POINTER(pointer) do  {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+        _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+        _mali_osk_dbgmsg( "           %s()%4d\n", __FUNCTION__, __LINE__) ; \
+        _mali_osk_dbgmsg args ; \
+        _mali_osk_dbgmsg("\n"); \
+       } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+                _mali_osk_dbgmsg("UMP: "); \
+                _mali_osk_dbgmsg args; \
+               } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data
+{
+       _mali_osk_list_t list_head_session_memory_list;  /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+       _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+       int api_version;
+       _mali_osk_lock_t * lock;
+       ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+       int cache_operations_ongoing;
+       int has_pending_level1_cache_flush;
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element
+{
+       struct ump_dd_mem * mem;
+       _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev
+{
+       _mali_osk_lock_t * secure_id_map_lock;
+       ump_descriptor_mapping * secure_id_map;
+       ump_memory_backend * backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode( _mali_osk_errcode_t err );
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_descriptor_mapping.c b/drivers/gpu/arm/mali400/ump/common/ump_kernel_descriptor_mapping.c
new file mode 100644 (file)
index 0000000..c0cdff6
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table * table);
+
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+       ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+
+       init_entries = MALI_PAD_INT(init_entries);
+       max_entries = MALI_PAD_INT(max_entries);
+
+       if (NULL != map)
+       {
+               map->table = descriptor_table_alloc(init_entries);
+               if (NULL != map->table)
+               {
+                       map->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_READERWRITER, 0 , 0);
+                       if ( NULL != map->lock )
+                       {
+                               _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+                               map->max_nr_mappings_allowed = max_entries;
+                               map->current_nr_mappings = init_entries;
+                               return map;
+                       }
+                       descriptor_table_free(map->table);
+               }
+               _mali_osk_free(map);
+       }
+       return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+{
+       descriptor_table_free(map->table);
+       _mali_osk_lock_term( map->lock );
+       _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+{
+       int descriptor = -1;/*-EFAULT;*/
+       _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+       if (descriptor == map->current_nr_mappings)
+       {
+               int nr_mappings_new;
+               /* no free descriptor, try to expand the table */
+               ump_descriptor_table * new_table;
+               ump_descriptor_table * old_table = map->table;
+               nr_mappings_new= map->current_nr_mappings *2;
+
+               if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+               {
+                       descriptor = -1;
+                       goto unlock_and_exit;
+               }
+
+               new_table = descriptor_table_alloc(nr_mappings_new);
+               if (NULL == new_table)
+               {
+                       descriptor = -1;
+                       goto unlock_and_exit;
+               }
+
+               _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+               _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+               map->table = new_table;
+               map->current_nr_mappings = nr_mappings_new;
+               descriptor_table_free(old_table);
+       }
+
+       /* we have found a valid descriptor, set the value and usage bit */
+       _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+       map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+       _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+       return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+{
+       int result = -1;/*-EFAULT;*/
+       DEBUG_ASSERT(map);
+       _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+       {
+               *target = map->table->mappings[descriptor];
+               result = 0;
+       }
+       else *target = NULL;
+       _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+       return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+{
+       int result = -1;/*-EFAULT;*/
+       _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+       {
+               map->table->mappings[descriptor] = target;
+               result = 0;
+       }
+       _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+       return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+{
+       _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+       {
+               map->table->mappings[descriptor] = NULL;
+               _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+       }
+       _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table * descriptor_table_alloc(int count)
+{
+       ump_descriptor_table * table;
+
+       table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+
+       if (NULL != table)
+       {
+               table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
+               table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+       }
+
+       return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table * table)
+{
+       _mali_osk_free(table);
+}
+
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_descriptor_mapping.h b/drivers/gpu/arm/mali400/ump/common/ump_kernel_descriptor_mapping.h
new file mode 100644 (file)
index 0000000..fed5336
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table
+{
+       u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+       void** mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping
+{
+       _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+       int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+       int current_nr_mappings; /**< Current number of possible mappings */
+       ump_descriptor_table * table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_memory_backend.h b/drivers/gpu/arm/mali400/ump/common/ump_kernel_memory_backend.h
new file mode 100644 (file)
index 0000000..c3cc078
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation
+{
+       void                    * phys_addr;
+       void                    * mapping;
+       unsigned long             size;
+       ump_dd_handle             handle;
+       void                    * process_mapping_info;
+       u32                       cookie;               /**< necessary on some U/K interface implementations */
+       struct ump_session_data * ump_session;          /**< Session that this allocation belongs to */
+       _mali_osk_list_t          list;                 /**< List for linking together memory allocations into the session's memory head */
+       u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend
+{
+       int  (*allocate)(void* ctx, ump_dd_mem * descriptor);
+       void (*release)(void* ctx, ump_dd_mem * descriptor);
+       void (*shutdown)(struct ump_memory_backend * backend);
+       u32  (*stat)(struct ump_memory_backend *backend);
+       int  (*pre_allocate_physical_check)(void *ctx, u32 size);
+       u32  (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+       void *(*get)(ump_dd_mem *mem, void *args); /* MALI_SEC */
+       void (*set)(ump_dd_mem *mem, void *args);
+       void * ctx;
+} ump_memory_backend;
+
+ump_memory_backend * ump_memory_backend_create ( void );
+void ump_memory_backend_destroy( void );
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_ref_drv.c b/drivers/gpu/arm/mali400/ump/common/ump_kernel_ref_drv.c
new file mode 100644 (file)
index 0000000..ae278ca
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE         4096
+#define UMP_MINIMUM_SIZE_MASK    (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x)        (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+       ump_dd_mem * mem;
+       unsigned long size_total = 0;
+       int map_id;
+       u32 i;
+
+       /* Go through the input blocks and verify that they are sane */
+       for (i=0; i < num_blocks; i++)
+       {
+               unsigned long addr = blocks[i].addr;
+               unsigned long size = blocks[i].size;
+
+               DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+               size_total += blocks[i].size;
+
+               if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
+               {
+                       MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+                       return UMP_DD_HANDLE_INVALID;
+               }
+
+               if (0 != UMP_ADDR_ALIGN_OFFSET(size))
+               {
+                       MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+                       return UMP_DD_HANDLE_INVALID;
+               }
+       }
+
+       /* Allocate the ump_dd_mem struct for this allocation */
+       mem = _mali_osk_malloc(sizeof(*mem));
+       if (NULL == mem)
+       {
+               DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+               return UMP_DD_HANDLE_INVALID;
+       }
+
+       /* Find a secure ID for this allocation */
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
+
+       if (map_id < 0)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_free(mem);
+               DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+               return UMP_DD_HANDLE_INVALID;
+       }
+
+       /* Now, make a copy of the block information supplied by the user */
+       mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+       if (NULL == mem->block_array)
+       {
+               ump_descriptor_mapping_free(device.secure_id_map, map_id);
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_free(mem);
+               DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+               return UMP_DD_HANDLE_INVALID;
+       }
+
+       _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+       /* And setup the rest of the ump_dd_mem struct */
+       _mali_osk_atomic_init(&mem->ref_count, 1);
+       mem->secure_id = (ump_secure_id)map_id;
+       mem->size_bytes = size_total;
+       mem->nr_blocks = num_blocks;
+       mem->backend_info = NULL;
+       mem->ctx = NULL;
+       mem->release_func = phys_blocks_release;
+       /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+       mem->is_cached = 0;
+       mem->hw_device = _UMP_UK_USED_BY_CPU;
+       mem->lock_usage = UMP_NOT_LOCKED;
+
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+       return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+{
+       _mali_osk_free(descriptor->block_array);
+       descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+{
+       ump_session_data * session_data = NULL;
+       ump_dd_mem *new_allocation = NULL;
+       ump_session_memory_list_element * session_memory_element = NULL;
+       int map_id;
+
+       DEBUG_ASSERT_POINTER( user_interaction );
+       DEBUG_ASSERT_POINTER( user_interaction->ctx );
+
+       session_data = (ump_session_data *) user_interaction->ctx;
+
+       session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+       if (NULL == session_memory_element)
+       {
+               DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+
+       new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
+       if (NULL==new_allocation)
+       {
+               _mali_osk_free(session_memory_element);
+               DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       /* Create a secure ID for this allocation */
+       _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+       map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
+
+       if (map_id < 0)
+       {
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_free(session_memory_element);
+               _mali_osk_free(new_allocation);
+               DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+               return - _MALI_OSK_ERR_INVALID_FUNC;
+       }
+
+       /* Initialize the part of the new_allocation that we know so for */
+       new_allocation->secure_id = (ump_secure_id)map_id;
+       _mali_osk_atomic_init(&new_allocation->ref_count,1);
+       if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+                new_allocation->is_cached = 0;
+       else new_allocation->is_cached = 1;
+
+       /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+       if (0 == user_interaction->size)
+       {
+               user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+       }
+
+       new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+       new_allocation->lock_usage = UMP_NOT_LOCKED;
+
+       /* Now, ask the active memory backend to do the actual memory allocation */
+       if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
+       {
+               DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
+               ump_descriptor_mapping_free(device.secure_id_map, map_id);
+               _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+               _mali_osk_free(new_allocation);
+               _mali_osk_free(session_memory_element);
+               return _MALI_OSK_ERR_INVALID_FUNC;
+       }
+       new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
+       new_allocation->ctx = device.backend->ctx;
+       new_allocation->release_func = device.backend->release;
+
+       _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+       /* Initialize the session_memory_element, and add it to the session object */
+       session_memory_element->mem = new_allocation;
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       user_interaction->secure_id = new_allocation->secure_id;
+       user_interaction->size = new_allocation->size_bytes;
+       DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_kernel_types.h b/drivers/gpu/arm/mali400/ump/common/ump_kernel_types.h
new file mode 100644 (file)
index 0000000..10b70f9
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+
+typedef enum
+{
+       UMP_USED_BY_CPU = 0,
+       UMP_USED_BY_MALI = 1,
+       UMP_USED_BY_UNKNOWN_DEVICE= 100,
+} ump_hw_usage;
+
+typedef enum
+{
+       UMP_NOT_LOCKED = 0,
+       UMP_READ = 1,
+       UMP_READ_WRITE = 3,
+} ump_lock_usage;
+
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem
+{
+       ump_secure_id secure_id;
+       _mali_osk_atomic_t ref_count;
+       unsigned long size_bytes;
+       unsigned long nr_blocks;
+       ump_dd_physical_block * block_array;
+       void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
+       void * ctx;
+       void * backend_info;
+       int is_cached;
+       ump_hw_usage hw_device;
+       ump_lock_usage lock_usage;
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+       struct dma_buf_attachment *import_attach;
+       struct sg_table *sgt;
+#endif
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_osk.h b/drivers/gpu/arm/mali400/ump/common/ump_osk.h
new file mode 100644 (file)
index 0000000..3da77d9
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include "ump_uk_types.h"
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t _ump_osk_init( void );
+
+_mali_osk_errcode_t _ump_osk_term( void );
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+
+void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data );
+/* MALI_SEC */
+void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/mali400/ump/common/ump_ukk.h b/drivers/gpu/arm/mali400/ump/common/ump_ukk.h
new file mode 100644 (file)
index 0000000..3cd4dec
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open( void** context );
+
+_mali_osk_errcode_t _ump_ukk_close( void** context );
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_ukk_msync( _ump_uk_msync_s *args );
+
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args);
+
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args );
+
+void _ump_ukk_lock(_ump_uk_lock_s *args );
+
+void _ump_ukk_unlock(_ump_uk_unlock_s *args );
+
+u32 _ump_ukk_report_memory_usage( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/include/ump_kernel_interface.h b/drivers/gpu/arm/mali400/ump/include/ump_kernel_interface.h
new file mode 100644 (file)
index 0000000..c14922d
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void * ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+       UMP_DD_SUCCESS, /**< indicates success */
+       UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+       unsigned long addr; /**< The physical address of the block */
+       unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ *                   @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif  /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/include/ump_kernel_interface_ref_drv.h b/drivers/gpu/arm/mali400/ump/include/ump_kernel_interface_ref_drv.h
new file mode 100644 (file)
index 0000000..0247e76
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks);
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get(ump_secure_id secure_id);
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_meminfo_set(ump_dd_handle memh, void* args);
+UMP_KERNEL_API_EXPORT void *ump_dd_meminfo_get(ump_secure_id secure_id, void* args);
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get_from_vaddr(unsigned long vaddr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/include/ump_kernel_platform.h b/drivers/gpu/arm/mali400/ump/include/ump_kernel_platform.h
new file mode 100644 (file)
index 0000000..dfefa2a
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/include/ump_uk_types.h b/drivers/gpu/arm/mali400/ump/include/ump_uk_types.h
new file mode 100644 (file)
index 0000000..c587d83
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+       _UMP_IOC_QUERY_API_VERSION = 1,
+       _UMP_IOC_ALLOCATE,
+       _UMP_IOC_RELEASE,
+       _UMP_IOC_SIZE_GET,
+       _UMP_IOC_MAP_MEM,    /* not used in Linux */
+       _UMP_IOC_UNMAP_MEM,  /* not used in Linux */
+       _UMP_IOC_MSYNC,
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+       _UMP_IOC_DMABUF_IMPORT,
+#endif
+       _UMP_IOC_CACHE_OPERATIONS_CONTROL,
+       _UMP_IOC_SWITCH_HW_USAGE,
+       _UMP_IOC_LOCK,
+       _UMP_IOC_UNLOCK,
+       _UMP_IOC_ION_IMPORT,
+}_ump_uk_functions;
+
+typedef enum
+{
+       UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+       UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+       UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 128,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+       _UMP_UK_MSYNC_CLEAN = 0,
+       _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+       _UMP_UK_MSYNC_INVALIDATE = 2,
+       _UMP_UK_MSYNC_FLUSH_L1   = 3,
+       _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+typedef enum
+{
+       _UMP_UK_CACHE_OP_START = 0,
+       _UMP_UK_CACHE_OP_FINISH  = 1,
+} ump_uk_cache_op_control;
+
+typedef enum
+{
+       _UMP_UK_READ = 1,
+       _UMP_UK_READ_WRITE = 3,
+} ump_uk_lock_usage;
+
+typedef enum
+{
+       _UMP_UK_USED_BY_CPU = 0,
+       _UMP_UK_USED_BY_MALI = 1,
+       _UMP_UK_USED_BY_UNKNOWN_DEVICE= 100,
+} ump_uk_user;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+       void *ctx;      /**< [in,out] user-kernel context (trashed on output) */
+       u32 version;    /**< Set to the user space version on entry, stores the device driver version on exit */
+       u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size,  [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;                          /**< Return value from DD to Userdriver */
+       u32 size;                               /**< Input and output. Requested size; input. Returned size; output */
+       ump_uk_alloc_constraints constraints;   /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+typedef struct _ump_uk_ion_import_s
+{
+       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
+       int ion_fd;                             /**< ion_fd */
+       u32 secure_id;                          /**< Return value from DD to Userdriver */
+       u32 size;                               /**< Input and output. Requested size; input. Returned size; output */
+       ump_uk_alloc_constraints constraints;   /**< Only input to Devicedriver */
+} _ump_uk_ion_import_s;
+
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+struct ump_uk_dmabuf {
+       void* ctx;
+       int fd;
+       size_t size;
+       uint32_t secure_id;
+};
+#endif
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;                          /**< Input to DD */
+       u32 size;                               /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;                          /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
+       void *phys_addr;                /**< [in] physical address */
+       unsigned long size;             /**< [in] size */
+       u32 secure_id;                  /**< [in] secure_id to assign to mapping */
+       void * _ukk_private;            /**< Only used inside linux port between kernel frontend and common part to store vma */
+       u32 cookie;
+       u32 is_cached;            /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;
+       u32 size;
+       void * _ukk_private;
+       u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;        /**< [in] mapping addr */
+       void *address;        /**< [in] flush start addr */
+       u32 size;             /**< [in] size to flush */
+       ump_uk_msync_op op;   /**< [in] flush operation */
+       u32 cookie;           /**< [in] cookie stored with reference to the kernel mapping internals */
+       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
+       u32 is_cached;        /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+typedef struct _ump_uk_cache_operations_control_s
+{
+       void *ctx;                   /**< [in,out] user-kernel context (trashed on output) */
+       ump_uk_cache_op_control op;  /**< [in] cache operations start/stop */
+} _ump_uk_cache_operations_control_s;
+
+
+typedef struct _ump_uk_switch_hw_usage_s
+{
+       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
+       ump_uk_user new_user;         /**< [in] cookie stored with reference to the kernel mapping internals */
+
+} _ump_uk_switch_hw_usage_s;
+
+typedef struct _ump_uk_lock_s
+{
+       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
+       ump_uk_lock_usage lock_usage;
+} _ump_uk_lock_s;
+
+typedef struct _ump_uk_unlock_s
+{
+       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
+       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
+} _ump_uk_unlock_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/linux/license/gpl/ump_kernel_license.h b/drivers/gpu/arm/mali400/ump/linux/license/gpl/ump_kernel_license.h
new file mode 100644 (file)
index 0000000..698e206
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE     "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_ioctl.h b/drivers/gpu/arm/mali400/ump/linux/ump_ioctl.h
new file mode 100644 (file)
index 0000000..300f303
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include <ump_uk_types.h>
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE  _IOWR(UMP_IOCTL_NR,  _UMP_IOC_ALLOCATE,  _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE  _IOR(UMP_IOCTL_NR,  _UMP_IOC_RELEASE,  _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET  _IOWR(UMP_IOCTL_NR,  _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC     _IOW(UMP_IOCTL_NR,  _UMP_IOC_MSYNC, _ump_uk_msync_s)
+/* MALI_SEC */
+#ifdef CONFIG_ION_EXYNOS
+#define UMP_IOC_ION_IMPORT  _IOW(UMP_IOCTL_NR,  _UMP_IOC_ION_IMPORT, _ump_uk_ion_import_s)
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#define UMP_IOC_DMABUF_IMPORT  _IOW(UMP_IOCTL_NR,  _UMP_IOC_DMABUF_IMPORT,\
+                                       struct ump_uk_dmabuf)
+#endif
+
+#define UMP_IOC_CACHE_OPERATIONS_CONTROL _IOW(UMP_IOCTL_NR,  _UMP_IOC_CACHE_OPERATIONS_CONTROL, _ump_uk_cache_operations_control_s)
+#define UMP_IOC_SWITCH_HW_USAGE   _IOW(UMP_IOCTL_NR,  _UMP_IOC_SWITCH_HW_USAGE, _ump_uk_switch_hw_usage_s)
+#define UMP_IOC_LOCK          _IOW(UMP_IOCTL_NR,  _UMP_IOC_LOCK, _ump_uk_lock_s)
+#define UMP_IOC_UNLOCK        _IOW(UMP_IOCTL_NR,  _UMP_IOC_UNLOCK, _ump_uk_unlock_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_kernel_linux.c b/drivers/gpu/arm/mali400/ump/linux/ump_kernel_linux.c
new file mode 100644 (file)
index 0000000..9c903cf
--- /dev/null
@@ -0,0 +1,454 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>            /* kernel module definitions */
+#include <linux/fs.h>                /* file system operations */
+#include <linux/cdev.h>              /* character device definitions */
+#include <linux/ioport.h>            /* request_mem_region */
+#include <linux/mm.h>                /* memory management functions and types */
+#include <asm/uaccess.h>             /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+
+#include "arch/config.h"             /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+#if UMP_LICENSE_IS_GPL
+static struct dentry *ump_debugfs_dir = NULL;
+#endif
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker
+{
+       int references;
+       ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device
+{
+       struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+       struct class * ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops =
+{
+       .owner   = THIS_MODULE,
+       .open    = ump_file_open,
+       .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+       .unlocked_ioctl   = ump_file_ioctl,
+#else
+       .ioctl   = ump_file_ioctl,
+#endif
+       .mmap    = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+       _mali_osk_errcode_t err;
+
+       DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+       err = ump_kernel_constructor();
+       if (_MALI_OSK_ERR_OK != err)
+       {
+               MSG_ERR(("UMP device driver init failed\n"));
+               return map_errcode(err);
+       }
+
+       MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+       return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+       DBG_MSG(2, ("Unloading UMP device driver\n"));
+       ump_kernel_destructor();
+       DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+static ssize_t ump_memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+        char buf[64];
+        size_t r;
+        u32 mem = _ump_ukk_report_memory_usage();
+
+        r = snprintf(buf, 64, "%u\n", mem);
+        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ump_memory_usage_fops = {
+        .owner = THIS_MODULE,
+        .read = ump_memory_used_read,
+};
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+       int err;
+       dev_t dev = 0;
+#if UMP_LICENSE_IS_GPL
+       ump_debugfs_dir = debugfs_create_dir(ump_dev_name, NULL);
+       if (ERR_PTR(-ENODEV) == ump_debugfs_dir)
+       {
+                       ump_debugfs_dir = NULL;
+       }
+       else
+       {
+               debugfs_create_file("memory_usage", 0400, ump_debugfs_dir, NULL, &ump_memory_usage_fops);
+       }
+#endif
+
+       if (0 == ump_major)
+       {
+               /* auto select a major */
+               err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+               ump_major = MAJOR(dev);
+       }
+       else
+       {
+               /* use load time defined major number */
+               dev = MKDEV(ump_major, 0);
+               err = register_chrdev_region(dev, 1, ump_dev_name);
+       }
+
+       if (0 == err)
+       {
+               memset(&ump_device, 0, sizeof(ump_device));
+
+               /* initialize our char dev data */
+               cdev_init(&ump_device.cdev, &ump_fops);
+               ump_device.cdev.owner = THIS_MODULE;
+               ump_device.cdev.ops = &ump_fops;
+
+               /* register char dev with the kernel */
+               err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+               if (0 == err)
+               {
+
+#if UMP_LICENSE_IS_GPL
+                       ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+                       if (IS_ERR(ump_device.ump_class))
+                       {
+                               err = PTR_ERR(ump_device.ump_class);
+                       }
+                       else
+                       {
+                               struct device * mdev;
+                               mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+                               if (!IS_ERR(mdev))
+                               {
+                                       return 0;
+                               }
+
+                               err = PTR_ERR(mdev);
+                       }
+                       cdev_del(&ump_device.cdev);
+#else
+                       return 0;
+#endif
+               }
+
+               unregister_chrdev_region(dev, 1);
+       }
+
+       return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+       dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+       device_destroy(ump_device.ump_class, dev);
+       class_destroy(ump_device.ump_class);
+#endif
+
+       /* unregister char device */
+       cdev_del(&ump_device.cdev);
+
+       /* free major */
+       unregister_chrdev_region(dev, 1);
+
+#if UMP_LICENSE_IS_GPL
+       if(ump_debugfs_dir)
+               debugfs_remove_recursive(ump_debugfs_dir);
+#endif
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+       struct ump_session_data * session_data;
+       _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (0 != MINOR(inode->i_rdev))
+       {
+               MSG_ERR(("Minor not zero in ump_file_open()\n"));
+               return -ENODEV;
+       }
+
+       /* Call the OS-Independent UMP Open function */
+       err = _ump_ukk_open((void**) &session_data );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               MSG_ERR(("Ump failed to open a new session\n"));
+               return map_errcode( err );
+       }
+
+       filp->private_data = (void*)session_data;
+       filp->f_pos = 0;
+
+       return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+       _mali_osk_errcode_t err;
+
+       err = _ump_ukk_close((void**) &filp->private_data );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               return map_errcode( err );
+       }
+
+       return 0;  /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+       int err = -ENOTTY;
+       void __user * argument;
+       struct ump_session_data * session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+       (void)inode; /* inode not used */
+#endif
+
+       session_data = (struct ump_session_data *)filp->private_data;
+       if (NULL == session_data)
+       {
+               MSG_ERR(("No session data attached to file object\n"));
+               return -ENOTTY;
+       }
+
+       /* interpret the argument as a user pointer to something */
+       argument = (void __user *)arg;
+
+       switch (cmd)
+       {
+               case UMP_IOC_QUERY_API_VERSION:
+                       err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_ALLOCATE :
+                       err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+                       break;
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+               case UMP_IOC_DMABUF_IMPORT:
+                       err = ump_dmabuf_import_wrapper((u32 __user *)argument,
+                                                       session_data);
+                       break;
+#endif
+
+               case UMP_IOC_RELEASE:
+                       err = ump_release_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_SIZE_GET:
+                       err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_MSYNC:
+                       err = ump_msync_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_CACHE_OPERATIONS_CONTROL:
+                       err = ump_cache_operations_control_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_SWITCH_HW_USAGE:
+                       err = ump_switch_hw_usage_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_LOCK:
+                       err = ump_lock_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               case UMP_IOC_UNLOCK:
+                       err = ump_unlock_wrapper((u32 __user *)argument, session_data);
+                       break;
+
+               default:
+                       DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+                       err = -EFAULT;
+                       break;
+       }
+
+       return err;
+}
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+       _ump_uk_map_mem_s args;
+       _mali_osk_errcode_t err;
+       struct ump_session_data * session_data;
+
+       /* Validate the session data */
+       session_data = (struct ump_session_data *)filp->private_data;
+       if (NULL == session_data || NULL == session_data->cookies_map->table->mappings) /* MALI_SEC */
+       {
+               MSG_ERR(("mmap() called without any session data available\n"));
+               return -EFAULT;
+       }
+
+       /* Re-pack the arguments that mmap() packed for us */
+       args.ctx = session_data;
+       args.phys_addr = 0;
+       args.size = vma->vm_end - vma->vm_start;
+       args._ukk_private = vma;
+       args.secure_id = vma->vm_pgoff;
+       args.is_cached = 0;
+
+       if (!(vma->vm_flags & VM_SHARED))
+       {
+               args.is_cached = 1;
+               vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE  ;
+               DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+       }
+       /* By setting this flag, during a process fork; the child process will not have the parent UMP mappings */
+       vma->vm_flags |= VM_DONTCOPY;
+
+       DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+
+       /* Call the common mmap handler */
+       err = _ump_ukk_map_mem( &args );
+       if ( _MALI_OSK_ERR_OK != err)
+       {
+               MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+               return map_errcode( err );
+       }
+
+       return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+module_init(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_kernel_linux.h b/drivers/gpu/arm/mali400/ump/linux/ump_kernel_linux.h
new file mode 100644 (file)
index 0000000..a23a705
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_LINUX_H__
+#define __UMP_KERNEL_LINUX_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100644 (file)
index 0000000..c309d63
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL)  /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info
+{
+       struct block_info * next;
+} block_info;
+
+
+
+typedef struct block_allocator
+{
+       struct semaphore mutex;
+       block_info * all_blocks;
+       block_info * first_free;
+       u32 base;
+       u32 num_blocks;
+       u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend * backend);
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
+static void block_allocator_release(void * ctx, ump_dd_mem * handle);
+static inline u32 get_phys(block_allocator * allocator, block_info * block);
+static u32 block_allocator_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+{
+       ump_memory_backend * backend;
+       block_allocator * allocator;
+       u32 usable_size;
+       u32 num_blocks;
+
+       usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+       num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+       if (0 == usable_size)
+       {
+               DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+               return NULL;
+       }
+
+       DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+       DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+       backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+       if (NULL != backend)
+       {
+               allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+               if (NULL != allocator)
+               {
+                       allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
+                       if (NULL != allocator->all_blocks)
+                       {
+                               int i;
+
+                               allocator->first_free = NULL;
+                               allocator->num_blocks = num_blocks;
+                               allocator->num_free = num_blocks;
+                               allocator->base = base_address;
+                               sema_init(&allocator->mutex, 1);
+
+                               for (i = 0; i < num_blocks; i++)
+                               {
+                                       allocator->all_blocks[i].next = allocator->first_free;
+                                       allocator->first_free = &allocator->all_blocks[i];
+                               }
+
+                               backend->ctx = allocator;
+                               backend->allocate = block_allocator_allocate;
+                               backend->release = block_allocator_release;
+                               backend->shutdown = block_allocator_shutdown;
+                               backend->stat = block_allocator_stat;
+                               backend->pre_allocate_physical_check = NULL;
+                               backend->adjust_to_mali_phys = NULL;
+                               /* MALI_SEC */
+                               backend->get = NULL;
+                               backend->set = NULL;
+
+                               return backend;
+                       }
+                       kfree(allocator);
+               }
+               kfree(backend);
+       }
+
+       return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend * backend)
+{
+       block_allocator * allocator;
+
+       BUG_ON(!backend);
+       BUG_ON(!backend->ctx);
+
+       allocator = (block_allocator*)backend->ctx;
+
+       DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+       kfree(allocator->all_blocks);
+       kfree(allocator);
+       kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+{
+       block_allocator * allocator;
+       u32 left;
+       block_info * last_allocated = NULL;
+       int i = 0;
+
+       BUG_ON(!ctx);
+       BUG_ON(!mem);
+
+       allocator = (block_allocator*)ctx;
+       left = mem->size_bytes;
+
+       BUG_ON(!left);
+       BUG_ON(!&allocator->mutex);
+
+       mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+       mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+       if (NULL == mem->block_array)
+       {
+               MSG_ERR(("Failed to allocate block array\n"));
+               return 0;
+       }
+
+       if (down_interruptible(&allocator->mutex))
+       {
+               MSG_ERR(("Could not get mutex to do block_allocate\n"));
+               return 0;
+       }
+
+       mem->size_bytes = 0;
+
+       while ((left > 0) && (allocator->first_free))
+       {
+               block_info * block;
+
+               block = allocator->first_free;
+               allocator->first_free = allocator->first_free->next;
+               block->next = last_allocated;
+               last_allocated = block;
+               allocator->num_free--;
+
+               mem->block_array[i].addr = get_phys(allocator, block);
+               mem->block_array[i].size = UMP_BLOCK_SIZE;
+               mem->size_bytes += UMP_BLOCK_SIZE;
+
+               i++;
+
+               if (left < UMP_BLOCK_SIZE) left = 0;
+               else left -= UMP_BLOCK_SIZE;
+       }
+
+       if (left)
+       {
+               block_info * block;
+               /* release all memory back to the pool */
+               while (last_allocated)
+               {
+                       block = last_allocated->next;
+                       last_allocated->next = allocator->first_free;
+                       allocator->first_free = last_allocated;
+                       last_allocated = block;
+                       allocator->num_free++;
+               }
+
+               vfree(mem->block_array);
+               mem->backend_info = NULL;
+               mem->block_array = NULL;
+
+               DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+               up(&allocator->mutex);
+
+               return 0;
+       }
+
+       mem->backend_info = last_allocated;
+
+       up(&allocator->mutex);
+       mem->is_cached=0;
+
+       return 1;
+}
+
+
+
+static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+{
+       block_allocator * allocator;
+       block_info * block, * next;
+
+       BUG_ON(!ctx);
+       BUG_ON(!handle);
+
+       allocator = (block_allocator*)ctx;
+       block = (block_info*)handle->backend_info;
+       BUG_ON(!block);
+
+       if (down_interruptible(&allocator->mutex))
+       {
+               MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+               return;
+       }
+
+       while (block)
+       {
+               next = block->next;
+
+               BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+               block->next = allocator->first_free;
+               allocator->first_free = block;
+               allocator->num_free++;
+
+               block = next;
+       }
+       DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+       up(&allocator->mutex);
+
+       vfree(handle->block_array);
+       handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator * allocator, block_info * block)
+{
+       return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
+
+static u32 block_allocator_stat(struct ump_memory_backend *backend)
+{
+       block_allocator *allocator;
+       BUG_ON(!backend);
+       allocator = (block_allocator*)backend->ctx;
+       BUG_ON(!allocator);
+
+       return (allocator->num_blocks - allocator->num_free)* UMP_BLOCK_SIZE;
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100644 (file)
index 0000000..85432a4
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_os.c b/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_os.c
new file mode 100644 (file)
index 0000000..4e635ea
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator
+{
+       struct semaphore mutex;
+       u32 num_pages_max;       /**< Maximum number of pages to allocate from the OS */
+       u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void* ctx, ump_dd_mem * descriptor);
+static int os_allocate(void* ctx, ump_dd_mem * descriptor);
+static void os_memory_backend_destroy(ump_memory_backend * backend);
+static u32 os_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+{
+       ump_memory_backend * backend;
+       os_allocator * info;
+
+       info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+       if (NULL == info)
+       {
+               return NULL;
+       }
+
+       info->num_pages_max = max_allocation >> PAGE_SHIFT;
+       info->num_pages_allocated = 0;
+
+       sema_init(&info->mutex, 1);
+
+       backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+       if (NULL == backend)
+       {
+               kfree(info);
+               return NULL;
+       }
+
+       backend->ctx = info;
+       backend->allocate = os_allocate;
+       backend->release = os_free;
+       backend->shutdown = os_memory_backend_destroy;
+       backend->stat = os_stat;
+       backend->pre_allocate_physical_check = NULL;
+       backend->adjust_to_mali_phys = NULL;
+       /* MALI_SEC */
+       backend->get = NULL;
+       backend->set = NULL;
+
+       return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend * backend)
+{
+       os_allocator * info = (os_allocator*)backend->ctx;
+
+       DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+       kfree(info);
+       kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+{
+       u32 left;
+       os_allocator * info;
+       int pages_allocated = 0;
+       int is_cached;
+
+       BUG_ON(!descriptor);
+       BUG_ON(!ctx);
+
+       info = (os_allocator*)ctx;
+       left = descriptor->size_bytes;
+       is_cached = descriptor->is_cached;
+
+       if (down_interruptible(&info->mutex))
+       {
+               DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+               return 0; /* failure */
+       }
+
+       descriptor->backend_info = NULL;
+       descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+       DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+       descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+       if (NULL == descriptor->block_array)
+       {
+               up(&info->mutex);
+               DBG_MSG(1, ("Block array could not be allocated\n"));
+               return 0; /* failure */
+       }
+
+       while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
+       {
+               struct page * new_page;
+
+               if (is_cached)
+               {
+                       new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
+               } else
+               {
+                       new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+               }
+               if (NULL == new_page)
+               {
+                       MSG_ERR(("UMP memory allocated: Out of Memory !!\n"));
+                       break;
+               }
+
+               /* Ensure page caches are flushed. */
+               if ( is_cached )
+               {
+                       descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+                       descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+               } else
+               {
+                       descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+                       descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+               }
+
+               DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+               if (left < PAGE_SIZE)
+               {
+                       left = 0;
+               }
+               else
+               {
+                       left -= PAGE_SIZE;
+               }
+
+               pages_allocated++;
+       }
+
+       DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id,  pages_allocated));
+
+       if (left)
+       {
+               DBG_MSG(1, ("Failed to allocate needed pages\n"));
+               DBG_MSG(1, ("UMP memory allocated: %d kB  Configured maximum OS memory usage: %d kB\n",
+                                (pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+               while(pages_allocated)
+               {
+                       pages_allocated--;
+                       if ( !is_cached )
+                       {
+                               dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       }
+                       __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+               }
+
+               up(&info->mutex);
+
+               return 0; /* failure */
+       }
+
+       info->num_pages_allocated += pages_allocated;
+
+       DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+       up(&info->mutex);
+
+       return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void* ctx, ump_dd_mem * descriptor)
+{
+       os_allocator * info;
+       int i;
+
+       BUG_ON(!ctx);
+       BUG_ON(!descriptor);
+
+       info = (os_allocator*)ctx;
+
+       BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+       if (down_interruptible(&info->mutex))
+       {
+               DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+               return;
+       }
+
+       DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+       info->num_pages_allocated -= descriptor->nr_blocks;
+
+       up(&info->mutex);
+
+       for ( i = 0; i < descriptor->nr_blocks; i++)
+       {
+               DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+               if ( ! descriptor->is_cached)
+               {
+                       dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+               }
+               __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+       }
+
+       vfree(descriptor->block_array);
+}
+
+
+static u32 os_stat(struct ump_memory_backend *backend)
+{
+       os_allocator *info;
+       info = (os_allocator*)backend->ctx;
+       return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_os.h b/drivers/gpu/arm/mali400/ump/linux/ump_kernel_memory_backend_os.h
new file mode 100644 (file)
index 0000000..82e708f
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_memory_backend.c b/drivers/gpu/arm/mali400/ump/linux/ump_memory_backend.c
new file mode 100644 (file)
index 0000000..a05c416
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>            /* kernel module definitions */
+#include <linux/ioport.h>            /* request_mem_region */
+
+#include "arch/config.h"             /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend* ump_memory_backend_create ( void )
+{
+       ump_memory_backend * backend = NULL;
+
+       /* Create the dynamic memory allocator backend */
+       if (0 == ump_backend)
+       {
+               DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+               DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+               /* Ask the OS if we can use the specified physical memory */
+               if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory"))
+               {
+                       MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+                       return NULL;
+               }
+               backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+       }
+       else if (1 == ump_backend)
+       {
+               DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+               backend = ump_os_memory_backend_create(ump_memory_size);
+       }
+/* MALI_SEC */
+#ifdef CONFIG_UMP_VCM_ALLOC
+       else if (2 == ump_backend)
+       {
+               DBG_MSG(2, ("Using VCM memory backend, allocation limit: %d\n", ump_memory_size));
+               backend = ump_vcm_memory_backend_create(ump_memory_size);
+       }
+#endif
+
+       return backend;
+}
+
+void ump_memory_backend_destroy( void )
+{
+       if (0 == ump_backend)
+       {
+               DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+               release_mem_region(ump_memory_address, ump_memory_size);
+       }
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_osk_atomics.c b/drivers/gpu/arm/mali400/ump/linux/ump_osk_atomics.c
new file mode 100644 (file)
index 0000000..6ebaa0a
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+{
+       return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+{
+       return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_osk_low_level_mem.c b/drivers/gpu/arm/mali400/ump/linux/ump_osk_low_level_mem.c
new file mode 100644 (file)
index 0000000..3a01220
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h>            /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h> /* MALI_SEC */
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/uaccess.h>                       /* to verify pointers from user space */
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker
+{
+       atomic_t references;
+       ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct * vma);
+static void ump_vma_close(struct vm_area_struct * vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops =
+{
+       .open = ump_vma_open,
+       .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       .fault = ump_cpu_page_fault_handler
+#else
+       .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       void __user * address;
+       address = vmf->virtual_address;
+#endif
+       MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+       MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+       return VM_FAULT_SIGBUS;
+#else
+       return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct * vma)
+{
+       ump_vma_usage_tracker * vma_usage_tracker;
+       int new_val;
+
+       vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+       BUG_ON(NULL == vma_usage_tracker);
+
+       new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+       DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct * vma)
+{
+       ump_vma_usage_tracker * vma_usage_tracker;
+       _ump_uk_unmap_mem_s args;
+       int new_val;
+
+       vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+       BUG_ON(NULL == vma_usage_tracker);
+
+       new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+       DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+       if (0 == new_val)
+       {
+               ump_memory_allocation * descriptor;
+
+               descriptor = vma_usage_tracker->descriptor;
+
+               args.ctx = descriptor->ump_session;
+               args.cookie = descriptor->cookie;
+               args.mapping = descriptor->mapping;
+               args.size = descriptor->size;
+
+               args._ukk_private = NULL; /** @note unused */
+
+               DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+               _ump_ukk_unmap_mem( & args );
+
+               /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+       }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+{
+       ump_vma_usage_tracker * vma_usage_tracker;
+       struct vm_area_struct *vma;
+
+       if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+       vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+       if (NULL == vma_usage_tracker)
+       {
+               DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+               return -_MALI_OSK_ERR_FAULT;
+       }
+
+       vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+       if (NULL == vma )
+       {
+               kfree(vma_usage_tracker);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       vma->vm_private_data = vma_usage_tracker;
+       vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+
+       if (0==descriptor->is_cached)
+       {
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       }
+       DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+
+       /* Setup the functions which handle further VMA handling */
+       vma->vm_ops = &ump_vm_ops;
+
+       /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+       descriptor->mapping = (void __user*)vma->vm_start;
+
+       atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+       vma_usage_tracker->descriptor = descriptor;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+{
+       struct vm_area_struct* vma;
+       ump_vma_usage_tracker * vma_usage_tracker;
+
+       if (NULL == descriptor) return;
+
+       /* Linux does the right thing as part of munmap to remove the mapping
+        * All that remains is that we remove the vma_usage_tracker setup in init() */
+       vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+       vma_usage_tracker = vma->vm_private_data;
+
+       /* We only get called if mem_mapregion_init succeeded */
+       kfree(vma_usage_tracker);
+       return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+{
+       struct vm_area_struct *vma;
+       _mali_osk_errcode_t retval;
+
+       if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+       vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+       if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+       retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+               DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+                       ump_dd_secure_id_get(descriptor->handle),
+                       (unsigned long)vma,
+                       (unsigned long)(vma->vm_start + offset),
+                       (unsigned long)*phys_addr,
+                       size,
+                       (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+       return retval;
+}
+
+static void level1_cache_flush_all(void)
+{
+       DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
+       __cpuc_flush_kern_all();
+}
+
+void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
+{
+       int i;
+       const void *start_v, *end_v;
+
+       /* Flush L1 using virtual address, the entire range in one go.
+        * Only flush if user space process has a valid write mapping on given address. */
+       if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) )
+       {
+               start_v = (void *)virt;
+               end_v   = (void *)(start_v + size - 1);
+               /*  There is no dmac_clean_range, so the L1 is always flushed,
+                *  also for UMP_MSYNC_CLEAN. */
+               /* MALI_SEC */
+               dmac_flush_range(start_v, end_v);
+
+               DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. Cpu address: %x-%x\n", mem->secure_id, start_v,end_v));
+       }
+       else
+       {
+               if (session_data)
+               {
+                       if (op == _UMP_UK_MSYNC_FLUSH_L1  )
+                       {
+                               DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
+                               session_data->has_pending_level1_cache_flush = 0;
+                               level1_cache_flush_all();
+                               return;
+                       }
+                       else
+                       {
+                               if (session_data->cache_operations_ongoing)
+                               {
+                                       session_data->has_pending_level1_cache_flush++;
+                                       DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
+                               }
+                               else
+                               {
+                                       /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
+                                       level1_cache_flush_all();
+                               }
+                       }
+               }
+               else
+               {
+                       DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
+                       level1_cache_flush_all();
+               }
+       }
+
+       if ( NULL == mem ) return;
+
+       if ( mem->size_bytes==size)
+       {
+               DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
+       }
+       else
+       {
+               DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
+                   mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
+       }
+
+
+       /* Flush L2 using physical addresses, block for block. */
+       for (i=0 ; i < mem->nr_blocks; i++)
+       {
+               u32 start_p, end_p;
+               ump_dd_physical_block *block;
+               block = &mem->block_array[i];
+
+               if(offset >= block->size)
+               {
+                       offset -= block->size;
+                       continue;
+               }
+
+               if(offset)
+               {
+                       start_p = (u32)block->addr + offset;
+                       /* We'll zero the offset later, after using it to calculate end_p. */
+               }
+               else
+               {
+                       start_p = (u32)block->addr;
+               }
+
+               if(size < block->size - offset)
+               {
+                       end_p = start_p + size - 1;
+                       size = 0;
+               }
+               else
+               {
+                       if(offset)
+                       {
+                               end_p = start_p + (block->size - offset - 1);
+                               size -= block->size - offset;
+                               offset = 0;
+                       }
+                       else
+                       {
+                               end_p = start_p + block->size - 1;
+                               size -= block->size;
+                       }
+               }
+
+               switch(op)
+               {
+                               case _UMP_UK_MSYNC_CLEAN:
+                                               outer_clean_range(start_p, end_p);
+                                               break;
+                               case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
+                                               outer_flush_range(start_p, end_p);
+                                               break;
+                               case _UMP_UK_MSYNC_INVALIDATE:
+                                               outer_inv_range(start_p, end_p);
+                                               break;
+                               default:
+                                               break;
+               }
+
+               if(0 == size)
+               {
+                       /* Nothing left to flush. */
+                       break;
+               }
+       }
+
+       return;
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_osk_misc.c b/drivers/gpu/arm/mali400/ump/linux/ump_osk_misc.c
new file mode 100644 (file)
index 0000000..dcfee7d
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init( void )
+{
+       if (0 != ump_kernel_device_initialize())
+       {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term( void )
+{
+       ump_kernel_device_terminate();
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_ukk_ref_wrappers.c b/drivers/gpu/arm/mali400/ump/linux/ump_ukk_ref_wrappers.c
new file mode 100644 (file)
index 0000000..e252a9b
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h>             /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/scatterlist.h>
+#include "ump_kernel_interface_ref_drv.h"
+#include "mali_osk_list.h"
+#include <linux/dma-buf.h>
+#endif
+
+/* FIXME */
+struct device tmp_dev;
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_allocate_s user_interaction;
+       _mali_osk_errcode_t err;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+               return -ENOTTY;
+       }
+
+       /* Copy the user space memory to kernel space (so we safely can read it) */
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+
+       err = _ump_ukk_allocate( &user_interaction );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+               return map_errcode(err);
+       }
+       user_interaction.ctx = NULL;
+
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+               _ump_uk_release_s release_args;
+
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+               release_args.ctx = (void *) session_data;
+               release_args.secure_id = user_interaction.secure_id;
+
+               err = _ump_ukk_release( &release_args );
+               if(_MALI_OSK_ERR_OK != err)
+               {
+                       MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+               }
+
+               return -EFAULT;
+       }
+
+       return 0; /* success */
+}
+
+/* MALI_SEC */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+static ump_dd_handle
+       get_ump_handle_from_dmabuf(struct ump_session_data *session_data,
+                                       struct dma_buf *dmabuf)
+{
+       ump_session_memory_list_element *session_mem, *tmp;
+       struct dma_buf_attachment *attach;
+       ump_dd_handle ump_handle;
+
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       _MALI_OSK_LIST_FOREACHENTRY(session_mem, tmp,
+                               &session_data->list_head_session_memory_list,
+                               ump_session_memory_list_element, list) {
+               if (session_mem->mem->import_attach) {
+                       attach = session_mem->mem->import_attach;
+                       if (attach->dmabuf == dmabuf) {
+                               _mali_osk_lock_signal(session_data->lock,
+                                                       _MALI_OSK_LOCKMODE_RW);
+                               ump_handle = (ump_dd_handle)session_mem->mem;
+                               return ump_handle;
+                       }
+               }
+       }
+
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       return NULL;
+}
+
+int ump_dmabuf_import_wrapper(u32 __user *argument,
+                               struct ump_session_data  *session_data)
+{
+       ump_session_memory_list_element *session = NULL;
+       struct ump_uk_dmabuf ump_dmabuf;
+       ump_dd_handle ump_handle;
+       ump_dd_physical_block *blocks = NULL;
+       struct dma_buf_attachment *attach = NULL;
+       struct dma_buf *dma_buf;
+       struct sg_table *sgt = NULL;
+       struct scatterlist *sgl;
+       unsigned long block_size;
+       unsigned int i = 0, npages;
+       int ret;
+
+       /* FIXME */
+       memset(&tmp_dev, 0x0, sizeof(struct device));
+
+       /* Sanity check input parameters */
+       if (!argument || !session_data) {
+               MSG_ERR(("NULL parameter.\n"));
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&ump_dmabuf, argument,
+                               sizeof(struct ump_uk_dmabuf))) {
+               MSG_ERR(("copy_from_user() failed.\n"));
+               return -EFAULT;
+       }
+
+       dma_buf = dma_buf_get(ump_dmabuf.fd);
+       if (IS_ERR(dma_buf))
+               return PTR_ERR(dma_buf);
+
+       /*
+        * if already imported then increase a refcount to the ump descriptor
+        * and call dma_buf_put() and then go to found to return previous
+        * ump secure id.
+        */
+       ump_handle = get_ump_handle_from_dmabuf(session_data, dma_buf);
+       if (ump_handle) {
+               dma_buf_put(dma_buf);
+               goto found;
+       }
+
+       attach = dma_buf_attach(dma_buf, &tmp_dev);
+       if (IS_ERR(attach)) {
+               ret = PTR_ERR(attach);
+               goto err_dma_buf_put;
+       }
+
+       sgt = dma_buf_map_attachment(attach, DMA_NONE);
+       if (IS_ERR(sgt)) {
+               ret = PTR_ERR(sgt);
+               goto err_dma_buf_detach;
+       }
+
+       npages = sgt->nents;
+
+       /* really need? */
+       ump_dmabuf.ctx = (void *)session_data;
+
+       block_size = sizeof(ump_dd_physical_block) * npages;
+
+       blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);
+       sgl = sgt->sgl;
+
+       while (i < npages) {
+               blocks[i].addr = sg_phys(sgl);
+               blocks[i].size = sgl->length;
+               sgl = sg_next(sgl);
+               i++;
+       }
+
+       /*
+        * Initialize the session memory list element, and add it
+        * to the session object
+        */
+       session = _mali_osk_calloc(1, sizeof(*session));
+       if (!session) {
+               DBG_MSG(1, ("Failed to allocate session.\n"));
+               ret = -EFAULT;
+               goto err_free_block;
+       }
+
+       ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
+       if (UMP_DD_HANDLE_INVALID == ump_handle) {
+               DBG_MSG(1, ("Failed to create ump handle.\n"));
+               ret = -EFAULT;
+               goto err_free_session;
+       }
+
+       session->mem = (ump_dd_mem *)ump_handle;
+       session->mem->import_attach = attach;
+       session->mem->sgt = sgt;
+
+       _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+       _mali_osk_list_add(&(session->list),
+                       &(session_data->list_head_session_memory_list));
+       _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+       _mali_osk_free(blocks);
+
+found:
+       ump_dmabuf.secure_id = ump_dd_secure_id_get(ump_handle);
+       ump_dmabuf.size = ump_dd_size_get(ump_handle);
+
+       if (copy_to_user(argument, &ump_dmabuf,
+                               sizeof(struct ump_uk_dmabuf))) {
+               MSG_ERR(("copy_to_user() failed.\n"));
+               if (session) {
+                       _mali_osk_lock_wait(session_data->lock,
+                                                       _MALI_OSK_LOCKMODE_RW);
+                       _mali_osk_list_del(&session->list);
+                       _mali_osk_lock_signal(session_data->lock,
+                                                       _MALI_OSK_LOCKMODE_RW);
+                       ump_dd_reference_release(ump_handle);
+                       _mali_osk_free(session);
+               }
+               return -EFAULT;
+       }
+
+       return 0;
+
+err_free_session:
+       _mali_osk_free(session);
+err_free_block:
+       _mali_osk_free(blocks);
+       dma_buf_unmap_attachment(attach, sgt, DMA_NONE);
+err_dma_buf_detach:
+       dma_buf_detach(dma_buf, attach);
+err_dma_buf_put:
+       dma_buf_put(dma_buf);
+       return ret;
+}
+#endif
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_ukk_ref_wrappers.h b/drivers/gpu/arm/mali400/ump/linux/ump_ukk_ref_wrappers.h
new file mode 100644 (file)
index 0000000..d9d444b
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+/* MALI_SEC */
+#ifdef CONFIG_ION_EXYNOS
+int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+int ump_dmabuf_import_wrapper(u32 __user *argument,
+                               struct ump_session_data  *session_data);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_ukk_wrappers.c b/drivers/gpu/arm/mali400/ump/linux/ump_ukk_wrappers.c
new file mode 100644 (file)
index 0000000..9cfa5e9
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h>             /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+       _ump_uk_api_version_s version_info;
+       _mali_osk_errcode_t err;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+               return -ENOTTY;
+       }
+
+       /* Copy the user space memory to kernel space (so we safely can read it) */
+       if (0 != copy_from_user(&version_info, argument, sizeof(version_info)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+               return -EFAULT;
+       }
+
+       version_info.ctx = (void*) session_data;
+       err = _ump_uku_get_api_version( &version_info );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+               return map_errcode(err);
+       }
+
+       version_info.ctx = NULL;
+
+       /* Copy ouput data back to user space */
+       if (0 != copy_to_user(argument, &version_info, sizeof(version_info)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+               return -EFAULT;
+       }
+
+       return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_release_s release_args;
+       _mali_osk_errcode_t err;
+
+       /* Sanity check input parameters */
+       if (NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+               return -ENOTTY;
+       }
+
+       /* Copy the user space memory to kernel space (so we safely can read it) */
+       if (0 != copy_from_user(&release_args, argument, sizeof(release_args)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+               return -EFAULT;
+       }
+
+       release_args.ctx = (void*) session_data;
+       err = _ump_ukk_release( &release_args );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+               return map_errcode(err);
+       }
+
+
+       return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_size_get_s user_interaction;
+       _mali_osk_errcode_t err;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+               return -ENOTTY;
+       }
+
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+       err = _ump_ukk_size_get( &user_interaction );
+       if( _MALI_OSK_ERR_OK != err )
+       {
+               MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+               return map_errcode(err);
+       }
+
+       user_interaction.ctx = NULL;
+
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+               return -EFAULT;
+       }
+
+       return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Do cache maintenance on specified UMP memory.
+ */
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_msync_s user_interaction;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+               return -ENOTTY;
+       }
+
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+
+       _ump_ukk_msync( &user_interaction );
+
+       user_interaction.ctx = NULL;
+
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+               return -EFAULT;
+       }
+
+       return 0; /* success */
+}
+int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_cache_operations_control_s user_interaction;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+               return -ENOTTY;
+       }
+
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_cache_operations_control()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+
+       _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s*) &user_interaction );
+
+       user_interaction.ctx = NULL;
+
+#if 0  /* No data to copy back */
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_cache_operations_control()\n"));
+               return -EFAULT;
+       }
+#endif
+       return 0; /* success */
+}
+
+int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_switch_hw_usage_s user_interaction;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+               return -ENOTTY;
+       }
+
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+
+       _ump_ukk_switch_hw_usage( &user_interaction );
+
+       user_interaction.ctx = NULL;
+
+#if 0  /* No data to copy back */
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+               return -EFAULT;
+       }
+#endif
+       return 0; /* success */
+}
+
+int ump_lock_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_lock_s user_interaction;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+               return -ENOTTY;
+       }
+
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+
+       _ump_ukk_lock( &user_interaction );
+
+       user_interaction.ctx = NULL;
+
+#if 0  /* No data to copy back */
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+               return -EFAULT;
+       }
+#endif
+
+       return 0; /* success */
+}
+
+int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+{
+       _ump_uk_unlock_s user_interaction;
+
+       /* Sanity check input parameters */
+       if (NULL == argument || NULL == session_data)
+       {
+               MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+               return -ENOTTY;
+       }
+
+       if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+               return -EFAULT;
+       }
+
+       user_interaction.ctx = (void *) session_data;
+
+       _ump_ukk_unlock( &user_interaction );
+
+       user_interaction.ctx = NULL;
+
+#if 0  /* No data to copy back */
+       if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+       {
+               MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+               return -EFAULT;
+       }
+#endif
+
+       return 0; /* success */
+}
diff --git a/drivers/gpu/arm/mali400/ump/linux/ump_ukk_wrappers.h b/drivers/gpu/arm/mali400/ump/linux/ump_ukk_wrappers.h
new file mode 100644 (file)
index 0000000..0d7abb1
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_lock_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */
index 4ac7199..f15ed97 100644 (file)
@@ -155,14 +155,14 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
index 83114b5..dcea8b3 100644 (file)
@@ -201,6 +201,19 @@ free:
 }
 EXPORT_SYMBOL(drm_gem_object_alloc);
 
 }
 EXPORT_SYMBOL(drm_gem_object_alloc);
 
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+       if (obj->import_attach) {
+               drm_prime_remove_buf_handle(&filp->prime,
+                               obj->import_attach->dmabuf);
+       }
+       if (obj->export_dma_buf) {
+               drm_prime_remove_buf_handle(&filp->prime,
+                               obj->export_dma_buf);
+       }
+}
+
 /**
  * Removes the mapping from handle to filp for this object.
  */
 /**
  * Removes the mapping from handle to filp for this object.
  */
@@ -233,9 +246,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
        idr_remove(&filp->object_idr, handle);
        spin_unlock(&filp->table_lock);
 
        idr_remove(&filp->object_idr, handle);
        spin_unlock(&filp->table_lock);
 
-       if (obj->import_attach)
-               drm_prime_remove_imported_buf_handle(&filp->prime,
-                               obj->import_attach->dmabuf);
+       drm_gem_remove_prime_handles(obj, filp);
 
        if (dev->driver->gem_close_object)
                dev->driver->gem_close_object(obj, filp);
 
        if (dev->driver->gem_close_object)
                dev->driver->gem_close_object(obj, filp);
@@ -532,9 +543,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
        struct drm_gem_object *obj = ptr;
        struct drm_device *dev = obj->dev;
 
        struct drm_gem_object *obj = ptr;
        struct drm_device *dev = obj->dev;
 
-       if (obj->import_attach)
-               drm_prime_remove_imported_buf_handle(&file_priv->prime,
-                               obj->import_attach->dmabuf);
+       drm_gem_remove_prime_handles(obj, file_priv);
 
        if (dev->driver->gem_close_object)
                dev->driver->gem_close_object(obj, file_priv);
 
        if (dev->driver->gem_close_object)
                dev->driver->gem_close_object(obj, file_priv);
index 033a60b..0e9784a 100644 (file)
@@ -282,6 +282,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
        case DRM_CAP_DUMB_PREFER_SHADOW:
                req->value = dev->mode_config.prefer_shadow;
                break;
        case DRM_CAP_DUMB_PREFER_SHADOW:
                req->value = dev->mode_config.prefer_shadow;
                break;
+       case DRM_CAP_TIMESTAMP_MONOTONIC:
+               req->value = drm_timestamp_monotonic;
+               break;
        default:
                return -EINVAL;
        }
        default:
                return -EINVAL;
        }
index 9a41842..5da4064 100644 (file)
@@ -575,7 +575,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                                          unsigned flags,
                                          struct drm_crtc *refcrtc)
 {
                                          unsigned flags,
                                          struct drm_crtc *refcrtc)
 {
-       struct timeval stime, raw_time;
+       ktime_t stime, etime, mono_time_offset;
+       struct timeval tv_etime;
        struct drm_display_mode *mode;
        int vbl_status, vtotal, vdisplay;
        int vpos, hpos, i;
        struct drm_display_mode *mode;
        int vbl_status, vtotal, vdisplay;
        int vpos, hpos, i;
@@ -624,13 +625,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                preempt_disable();
 
                /* Get system timestamp before query. */
                preempt_disable();
 
                /* Get system timestamp before query. */
-               do_gettimeofday(&stime);
+               stime = ktime_get();
 
                /* Get vertical and horizontal scanout pos. vpos, hpos. */
                vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
 
                /* Get system timestamp after query. */
 
                /* Get vertical and horizontal scanout pos. vpos, hpos. */
                vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
 
                /* Get system timestamp after query. */
-               do_gettimeofday(&raw_time);
+               etime = ktime_get();
+               if (!drm_timestamp_monotonic)
+                       mono_time_offset = ktime_get_monotonic_offset();
 
                preempt_enable();
 
 
                preempt_enable();
 
@@ -641,7 +644,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                        return -EIO;
                }
 
                        return -EIO;
                }
 
-               duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+               duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
                /* Accept result with <  max_error nsecs timing uncertainty. */
                if (duration_ns <= (s64) *max_error)
 
                /* Accept result with <  max_error nsecs timing uncertainty. */
                if (duration_ns <= (s64) *max_error)
@@ -688,14 +691,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                vbl_status |= 0x8;
        }
 
                vbl_status |= 0x8;
        }
 
+       if (!drm_timestamp_monotonic)
+               etime = ktime_sub(etime, mono_time_offset);
+
+       /* save this only for debugging purposes */
+       tv_etime = ktime_to_timeval(etime);
        /* Subtract time delta from raw timestamp to get final
         * vblank_time timestamp for end of vblank.
         */
        /* Subtract time delta from raw timestamp to get final
         * vblank_time timestamp for end of vblank.
         */
-       *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+       etime = ktime_sub_ns(etime, delta_ns);
+       *vblank_time = ktime_to_timeval(etime);
 
        DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
                  crtc, (int)vbl_status, hpos, vpos,
 
        DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
                  crtc, (int)vbl_status, hpos, vpos,
-                 (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+                 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
                  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
                  (int)duration_ns/1000, i);
 
                  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
                  (int)duration_ns/1000, i);
 
@@ -707,6 +716,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
 }
 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
+static struct timeval get_drm_timestamp(void)
+{
+       ktime_t now;
+
+       now = ktime_get();
+       if (!drm_timestamp_monotonic)
+               now = ktime_sub(now, ktime_get_monotonic_offset());
+
+       return ktime_to_timeval(now);
+}
+
 /**
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  * vblank interval.
 /**
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  * vblank interval.
@@ -744,9 +764,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
        }
 
        /* GPU high precision timestamp query unsupported or failed.
        }
 
        /* GPU high precision timestamp query unsupported or failed.
-        * Return gettimeofday timestamp as best estimate.
+        * Return current monotonic/gettimeofday timestamp as best estimate.
         */
         */
-       do_gettimeofday(tvblank);
+       *tvblank = get_drm_timestamp();
 
        return 0;
 }
 
        return 0;
 }
@@ -801,6 +821,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_vblank_count_and_time);
 
 }
 EXPORT_SYMBOL(drm_vblank_count_and_time);
 
+static void send_vblank_event(struct drm_device *dev,
+               struct drm_pending_vblank_event *e,
+               unsigned long seq, struct timeval *now)
+{
+       WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+       e->event.sequence = seq;
+       e->event.tv_sec = now->tv_sec;
+       e->event.tv_usec = now->tv_usec;
+
+       list_add_tail(&e->base.link,
+                     &e->base.file_priv->event_list);
+       wake_up_interruptible(&e->base.file_priv->event_wait);
+       trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+                                        e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+               struct drm_pending_vblank_event *e)
+{
+       struct timeval now;
+       unsigned int seq;
+       if (crtc >= 0) {
+               seq = drm_vblank_count_and_time(dev, crtc, &now);
+       } else {
+               seq = 0;
+
+               now = get_drm_timestamp();
+       }
+       send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
@@ -935,6 +996,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
 void drm_vblank_off(struct drm_device *dev, int crtc)
 {
        struct drm_pending_vblank_event *e, *t;
 void drm_vblank_off(struct drm_device *dev, int crtc)
 {
        struct drm_pending_vblank_event *e, *t;
@@ -954,15 +1022,9 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
                DRM_DEBUG("Sending premature vblank event on disable: \
                          wanted %d, current %d\n",
                          e->event.sequence, seq);
                DRM_DEBUG("Sending premature vblank event on disable: \
                          wanted %d, current %d\n",
                          e->event.sequence, seq);
-
-               e->event.sequence = seq;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
+               list_del(&e->base.link);
                drm_vblank_put(dev, e->pipe);
                drm_vblank_put(dev, e->pipe);
-               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-               trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
-                                                e->event.sequence);
+               send_vblank_event(dev, e, seq, &now);
        }
 
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
        }
 
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -1112,15 +1174,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
 
        e->event.sequence = vblwait->request.sequence;
        if ((seq - vblwait->request.sequence) <= (1 << 23)) {
 
        e->event.sequence = vblwait->request.sequence;
        if ((seq - vblwait->request.sequence) <= (1 << 23)) {
-               e->event.sequence = seq;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
                drm_vblank_put(dev, pipe);
                drm_vblank_put(dev, pipe);
-               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
+               send_vblank_event(dev, e, seq, &now);
                vblwait->reply.sequence = seq;
                vblwait->reply.sequence = seq;
-               trace_drm_vblank_event_delivered(current->pid, pipe,
-                                                vblwait->request.sequence);
        } else {
                /* drm_handle_vblank_events will call drm_vblank_put */
                list_add_tail(&e->base.link, &dev->vblank_event_list);
        } else {
                /* drm_handle_vblank_events will call drm_vblank_put */
                list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1261,14 +1317,9 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
                DRM_DEBUG("vblank event on %d, current %d\n",
                          e->event.sequence, seq);
 
                DRM_DEBUG("vblank event on %d, current %d\n",
                          e->event.sequence, seq);
 
-               e->event.sequence = seq;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
+               list_del(&e->base.link);
                drm_vblank_put(dev, e->pipe);
                drm_vblank_put(dev, e->pipe);
-               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-               trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
-                                                e->event.sequence);
+               send_vblank_event(dev, e, seq, &now);
        }
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 35123db..f3a65c1 100644 (file)
@@ -60,6 +60,7 @@ struct drm_prime_member {
        struct dma_buf *dma_buf;
        uint32_t handle;
 };
        struct dma_buf *dma_buf;
        uint32_t handle;
 };
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
 
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                struct drm_file *file_priv, uint32_t handle, uint32_t flags,
 
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                struct drm_file *file_priv, uint32_t handle, uint32_t flags,
@@ -67,6 +68,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 {
        struct drm_gem_object *obj;
        void *buf;
 {
        struct drm_gem_object *obj;
        void *buf;
+       int ret = 0;
+       struct dma_buf *dmabuf;
 
        obj = drm_gem_object_lookup(dev, file_priv, handle);
        if (!obj)
 
        obj = drm_gem_object_lookup(dev, file_priv, handle);
        if (!obj)
@@ -75,32 +78,47 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
        mutex_lock(&file_priv->prime.lock);
        /* re-export the original imported object */
        if (obj->import_attach) {
        mutex_lock(&file_priv->prime.lock);
        /* re-export the original imported object */
        if (obj->import_attach) {
-               get_dma_buf(obj->import_attach->dmabuf);
-               *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
-               drm_gem_object_unreference_unlocked(obj);
-               mutex_unlock(&file_priv->prime.lock);
-               return 0;
+               dmabuf = obj->import_attach->dmabuf;
+               goto out_have_obj;
        }
 
        if (obj->export_dma_buf) {
        }
 
        if (obj->export_dma_buf) {
-               get_dma_buf(obj->export_dma_buf);
-               *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
-               drm_gem_object_unreference_unlocked(obj);
-       } else {
-               buf = dev->driver->gem_prime_export(dev, obj, flags);
-               if (IS_ERR(buf)) {
-                       /* normally the created dma-buf takes ownership of the ref,
-                        * but if that fails then drop the ref
-                        */
-                       drm_gem_object_unreference_unlocked(obj);
-                       mutex_unlock(&file_priv->prime.lock);
-                       return PTR_ERR(buf);
-               }
-               obj->export_dma_buf = buf;
-               *prime_fd = dma_buf_fd(buf, flags);
+               dmabuf = obj->export_dma_buf;
+
+               ret = drm_prime_add_buf_handle(&file_priv->prime,
+                                              dmabuf, handle);
+               goto out_have_obj;
+       }
+
+       buf = dev->driver->gem_prime_export(dev, obj, flags);
+       if (IS_ERR(buf)) {
+               /* normally the created dma-buf takes ownership of the ref,
+                * but if that fails then drop the ref
+                */
+               ret = PTR_ERR(buf);
+               goto out;
        }
        }
+       obj->export_dma_buf = buf;
+
+       /* if we've exported this buffer the cheat and add it to the import list
+        * so we get the correct handle back
+        */
+       ret = drm_prime_add_buf_handle(&file_priv->prime,
+                                      obj->export_dma_buf, handle);
+       if (ret)
+               goto out;
+
+       *prime_fd = dma_buf_fd(buf, flags);
        mutex_unlock(&file_priv->prime.lock);
        return 0;
        mutex_unlock(&file_priv->prime.lock);
        return 0;
+
+out_have_obj:
+       get_dma_buf(dmabuf);
+       *prime_fd = dma_buf_fd(dmabuf, flags);
+out:
+       drm_gem_object_unreference_unlocked(obj);
+       mutex_unlock(&file_priv->prime.lock);
+       return ret;
 }
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 
 }
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
 
@@ -117,7 +135,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 
        mutex_lock(&file_priv->prime.lock);
 
 
        mutex_lock(&file_priv->prime.lock);
 
-       ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+       ret = drm_prime_lookup_buf_handle(&file_priv->prime,
                        dma_buf, handle);
        if (!ret) {
                ret = 0;
                        dma_buf, handle);
        if (!ret) {
                ret = 0;
@@ -136,7 +154,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
        if (ret)
                goto out_put;
 
        if (ret)
                goto out_put;
 
-       ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+       ret = drm_prime_add_buf_handle(&file_priv->prime,
                        dma_buf, *handle);
        if (ret)
                goto fail;
                        dma_buf, *handle);
        if (ret)
                goto fail;
@@ -258,7 +276,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 }
 EXPORT_SYMBOL(drm_prime_destroy_file_private);
 
 }
 EXPORT_SYMBOL(drm_prime_destroy_file_private);
 
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
 {
        struct drm_prime_member *member;
 
 {
        struct drm_prime_member *member;
 
@@ -266,14 +284,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
        if (!member)
                return -ENOMEM;
 
        if (!member)
                return -ENOMEM;
 
+       get_dma_buf(dma_buf);
        member->dma_buf = dma_buf;
        member->handle = handle;
        list_add(&member->entry, &prime_fpriv->head);
        return 0;
 }
        member->dma_buf = dma_buf;
        member->handle = handle;
        list_add(&member->entry, &prime_fpriv->head);
        return 0;
 }
-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
 
 
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
 {
        struct drm_prime_member *member;
 
 {
        struct drm_prime_member *member;
 
@@ -285,19 +303,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
        }
        return -ENOENT;
 }
        }
        return -ENOENT;
 }
-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
 
 
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
 {
        struct drm_prime_member *member, *safe;
 
        mutex_lock(&prime_fpriv->lock);
        list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
                if (member->dma_buf == dma_buf) {
 {
        struct drm_prime_member *member, *safe;
 
        mutex_lock(&prime_fpriv->lock);
        list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
                if (member->dma_buf == dma_buf) {
+                       dma_buf_put(dma_buf);
                        list_del(&member->entry);
                        kfree(member);
                }
        }
        mutex_unlock(&prime_fpriv->lock);
 }
                        list_del(&member->entry);
                        kfree(member);
                }
        }
        mutex_unlock(&prime_fpriv->lock);
 }
-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+EXPORT_SYMBOL(drm_prime_remove_buf_handle);
index aa454f8..fc7b777 100644 (file)
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
 EXPORT_SYMBOL(drm_timestamp_precision);
 
 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
 EXPORT_SYMBOL(drm_timestamp_precision);
 
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
 
 module_param_named(debug, drm_debug, int, 0600);
 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
 
 module_param_named(debug, drm_debug, int, 0600);
 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
 struct idr drm_minors_idr;
 
 
 struct idr drm_minors_idr;
 
index fe11704..71086f5 100644 (file)
@@ -2,11 +2,10 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos \
-               -Idrivers/media/video/samsung/ump/include
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
 exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
                exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
 exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
                exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
-               exynos_drm_buf.o exynos_drm_ump.o exynos_drm_gem.o \
+               exynos_drm_buf.o exynos_drm_gem.o \
                exynos_drm_core.o exynos_drm_plane.o
 
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
                exynos_drm_core.o exynos_drm_plane.o
 
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
index 3ecb30d..88276b2 100644 (file)
@@ -31,6 +31,7 @@
 static inline int exynos_drm_format_num_buffers(uint32_t format)
 {
        switch (format) {
 static inline int exynos_drm_format_num_buffers(uint32_t format)
 {
        switch (format) {
+       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV12M:
        case DRM_FORMAT_NV12MT:
                return 2;
        case DRM_FORMAT_NV12M:
        case DRM_FORMAT_NV12MT:
                return 2;
index b2fcc48..f16d43e 100644 (file)
@@ -422,7 +422,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
         * this callback will release a ump object only if user requested
         * ump export otherwise just return.
         */
         * this callback will release a ump object only if user requested
         * ump export otherwise just return.
         */
-       if (private_cb->release_buffer)
+       if (private_cb && private_cb->release_buffer)
                private_cb->release_buffer(exynos_gem_obj->priv_handle);
 
        if (!buf->pages)
                private_cb->release_buffer(exynos_gem_obj->priv_handle);
 
        if (!buf->pages)
index 8f3707d..eb6da24 100644 (file)
@@ -329,9 +329,9 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
                pos = &config->pos;
                sz = &config->sz;
 
                pos = &config->pos;
                sz = &config->sz;
 
-               DRM_INFO("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+               DRM_INFO("%s:prop_id[%d]ops[%s]fmt[%.4s]\n",
                        __func__, property->prop_id,
                        __func__, property->prop_id,
-                       i ? "dst" : "src", config->fmt);
+                       i ? "dst" : "src", (char *)&config->fmt);
 
                DRM_INFO("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
                        __func__, pos->x, pos->y, pos->w, pos->h,
 
                DRM_INFO("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
                        __func__, pos->x, pos->y, pos->w, pos->h,
@@ -438,9 +438,17 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
        list_splice_init(&priv->event_list, &c_node->event_list);
        list_add_tail(&c_node->list, &ippdrv->cmd_list);
 
        list_splice_init(&priv->event_list, &c_node->event_list);
        list_add_tail(&c_node->list, &ippdrv->cmd_list);
 
+       /*
+        * Previously, in case of WB and direct output, the ipp driver was not
+        * become dedicated. By suggestion of Inke Dae, make all ipp driver in
+        * use dedicated even in the case of M2M to avoid concurrent problem in
+        * TLB. We don't know whether there are cases which use more than 2 ipp
+        * driver at the same time. If it is not, this change solves the
+        * problems clearly without any side effect.
+        */
        /* make dedicated state without m2m */
        /* make dedicated state without m2m */
-       if (property->cmd != IPP_CMD_M2M)
-               ippdrv->dedicated = true;
+       /* if (property->cmd != IPP_CMD_M2M) */
+       ippdrv->dedicated = true;
 
        return 0;
 
 
        return 0;
 
index 17f881f..2c70464 100644 (file)
@@ -351,6 +351,18 @@ static void mixer_run(struct mixer_context *ctx)
        mixer_regs_dump(ctx);
 }
 
        mixer_regs_dump(ctx);
 }
 
+static void mixer_wait_for_vblank(void *ctx)
+{
+       struct mixer_context *mixer_ctx = ctx;
+       struct mixer_resources *res = &mixer_ctx->mixer_res;
+       int ret;
+
+       ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
+                               MXR_INT_STATUS_VSYNC), 50);
+       if (ret < 0)
+               DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
 static void vp_video_buffer(struct mixer_context *ctx, int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
 static void vp_video_buffer(struct mixer_context *ctx, int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
@@ -368,6 +380,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        switch (win_data->pixel_format) {
        case DRM_FORMAT_NV12MT:
                tiled_mode = true;
        switch (win_data->pixel_format) {
        case DRM_FORMAT_NV12MT:
                tiled_mode = true;
+       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV12M:
                crcb_mode = false;
                buf_num = 2;
        case DRM_FORMAT_NV12M:
                crcb_mode = false;
                buf_num = 2;
@@ -465,6 +478,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        mixer_vsync_set_update(ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
 
        mixer_vsync_set_update(ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
 
+       mixer_wait_for_vblank(ctx);
+
        vp_regs_dump(ctx);
 }
 
        vp_regs_dump(ctx);
 }
 
@@ -791,18 +806,6 @@ static void mixer_dpms(void *ctx, int mode)
        }
 }
 
        }
 }
 
-static void mixer_wait_for_vblank(void *ctx)
-{
-       struct mixer_context *mixer_ctx = ctx;
-       struct mixer_resources *res = &mixer_ctx->mixer_res;
-       int ret;
-
-       ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
-                               MXR_INT_STATUS_VSYNC), 50);
-       if (ret < 0)
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static void mixer_win_mode_set(void *ctx,
                              struct exynos_drm_overlay *overlay)
 {
 static void mixer_win_mode_set(void *ctx,
                              struct exynos_drm_overlay *overlay)
 {
index 8f8728b..864390b 100644 (file)
@@ -1552,6 +1552,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_BOOKCOVER) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
index 4886aea..88f57e9 100644 (file)
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD     0x7021
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE       0x0600
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD     0x7021
 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE       0x0600
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD         0xa000
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE     0xa004
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_BOOKCOVER       0xa005
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD   0xa006
 
 #define USB_VENDOR_ID_SIGMA_MICRO      0x1c4f
 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD     0x0002
 
 #define USB_VENDOR_ID_SIGMA_MICRO      0x1c4f
 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD     0x0002
index a20ae7a..d659f76 100644 (file)
@@ -149,6 +149,8 @@ static int samsung_kbd_input_mapping(struct hid_device *hdev,
                /* key found */
                case 0x32: samsung_kbd_mouse_map_key_clear(KEY_BACKSLASH); break;
                case 0x29: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
                /* key found */
                case 0x32: samsung_kbd_mouse_map_key_clear(KEY_BACKSLASH); break;
                case 0x29: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
+               /* Only for BR keyboard */
+               case 0x87: samsung_kbd_mouse_map_key_clear(KEY_RO); break;
                default:
                        return 0;
                }
                default:
                        return 0;
                }
@@ -186,6 +188,147 @@ static int samsung_kbd_input_mapping(struct hid_device *hdev,
        return 1;
 }
 
        return 1;
 }
 
+static int samsung_gamepad_input_mapping(struct hid_device *hdev,
+       struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+       unsigned long **bit, int *max)
+{
+       if (!(HID_UP_BUTTON == (usage->hid & HID_USAGE_PAGE) ||
+               HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)))
+               return 0;
+
+       dbg_hid("samsung wireless gamepad input mapping event [0x%x], %ld, %ld, [0x%x]\n",
+               usage->hid & HID_USAGE, hi->input->evbit[0], hi->input->absbit[0], usage->hid & HID_USAGE_PAGE);
+
+       if (HID_UP_BUTTON == (usage->hid & HID_USAGE_PAGE)) {
+               switch(usage->hid & HID_USAGE) {
+               case 0x01: samsung_kbd_mouse_map_key_clear(BTN_A); break;
+               case 0x02: samsung_kbd_mouse_map_key_clear(BTN_B); break;
+               case 0x03: samsung_kbd_mouse_map_key_clear(BTN_C); break;
+               case 0x04: samsung_kbd_mouse_map_key_clear(BTN_X); break;
+               case 0x05: samsung_kbd_mouse_map_key_clear(BTN_Y); break;
+               case 0x06: samsung_kbd_mouse_map_key_clear(BTN_Z); break;
+               case 0x07: samsung_kbd_mouse_map_key_clear(BTN_TL); break;
+               case 0x08: samsung_kbd_mouse_map_key_clear(BTN_TR); break;
+               case 0x09: samsung_kbd_mouse_map_key_clear(BTN_TL2); break;
+               case 0x0a: samsung_kbd_mouse_map_key_clear(BTN_TR2); break;
+               case 0x0b: samsung_kbd_mouse_map_key_clear(BTN_SELECT); break;
+               case 0x0c: samsung_kbd_mouse_map_key_clear(BTN_START); break;
+               case 0x0d: samsung_kbd_mouse_map_key_clear(BTN_MODE); break;
+               case 0x0e: samsung_kbd_mouse_map_key_clear(BTN_THUMBL); break;
+               case 0x0f: samsung_kbd_mouse_map_key_clear(BTN_THUMBR); break;
+               case 0x10: samsung_kbd_mouse_map_key_clear(BTN_GAME); break; /* Android code -> BTN_GAME but it is not exist in latest kernel */
+               default:
+                       return 0;
+               }
+       }
+
+       if (HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)) {
+               switch (usage->hid & HID_USAGE) {
+               case 0x040: samsung_kbd_mouse_map_key_clear(KEY_MENU); break;
+               case 0x223: samsung_kbd_mouse_map_key_clear(KEY_HOMEPAGE); break;
+               case 0x224: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
+
+               /* Screen Capture */
+               case 0x303: samsung_kbd_mouse_map_key_clear(KEY_SYSRQ); break;
+
+               default:
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
+static int samsung_actionmouse_input_mapping(struct hid_device *hdev,
+       struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+       unsigned long **bit, int *max)
+{
+
+       dbg_hid("samsung wireless actionmouse input mapping event [0x%x], [0x%x], %ld, %ld, [0x%x]\n",
+                       usage->hid, usage->hid & HID_USAGE, hi->input->evbit[0], hi->input->absbit[0], usage->hid & HID_USAGE_PAGE);
+
+       if(((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) && ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON))
+               return 0;
+
+       switch (usage->hid & HID_USAGE) {
+               case 0x301: samsung_kbd_mouse_map_key_clear(KEY_RECENT); break;
+               default:
+                       return 0;
+       }
+
+       return 1;
+}
+
+static int samsung_universal_kbd_input_mapping(struct hid_device *hdev,
+       struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+       unsigned long **bit, int *max)
+{
+       if (!(HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE) ||
+                       HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)))
+               return 0;
+
+       dbg_hid("samsung wireless keyboard input mapping event [0x%x]\n",
+               usage->hid & HID_USAGE);
+
+       if (HID_UP_KEYBOARD == (usage->hid & HID_USAGE_PAGE)) {
+               switch (usage->hid & HID_USAGE) {
+               set_bit(EV_REP, hi->input->evbit);
+               /* Only for UK keyboard */
+               /* key found */
+#ifdef CONFIG_HID_KK_UPGRADE
+               case 0x32: samsung_kbd_mouse_map_key_clear(KEY_KBDILLUMTOGGLE); break;
+               case 0x64: samsung_kbd_mouse_map_key_clear(KEY_BACKSLASH); break;
+#else
+               case 0x32: samsung_kbd_mouse_map_key_clear(KEY_BACKSLASH); break;
+               case 0x64: samsung_kbd_mouse_map_key_clear(KEY_102ND); break;
+#endif
+               /* Only for BR keyboard */
+               case 0x87: samsung_kbd_mouse_map_key_clear(KEY_RO); break;
+               default:
+                       return 0;
+               }
+       }
+
+       if (HID_UP_CONSUMER == (usage->hid & HID_USAGE_PAGE)) {
+               switch (usage->hid & HID_USAGE) {
+               /* report 2 */
+               /* MENU */
+               case 0x040: samsung_kbd_mouse_map_key_clear(KEY_MENU); break;
+               case 0x18a: samsung_kbd_mouse_map_key_clear(KEY_MAIL); break;
+               case 0x196: samsung_kbd_mouse_map_key_clear(KEY_WWW); break;
+               case 0x19e: samsung_kbd_mouse_map_key_clear(KEY_SCREENLOCK); break;
+               case 0x221: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break;
+               case 0x223: samsung_kbd_mouse_map_key_clear(KEY_HOMEPAGE); break;
+               /* RECENTAPPS */
+               case 0x301: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY1); break;
+               /* APPLICATION */
+               case 0x302: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY2); break;
+               /* Voice search */
+               case 0x305: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY4); break;
+               /* QPANEL on/off */
+               case 0x306: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY5); break;
+               /* SIP on/off */
+               case 0x307: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY3); break;
+               /* LANG */
+               case 0x308: samsung_kbd_mouse_map_key_clear(KEY_LANGUAGE); break;
+               case 0x30a: samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSDOWN); break;
+               case 0x070: samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSDOWN); break;
+               case 0x30b: samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSUP); break;
+               case 0x06f: samsung_kbd_mouse_map_key_clear(KEY_BRIGHTNESSUP); break;
+               /* S-Finder */
+               case 0x304: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY7); break;
+               /* Screen Capture */
+               case 0x303: samsung_kbd_mouse_map_key_clear(KEY_SYSRQ); break;
+               /* Multi Window */
+               case 0x309: samsung_kbd_mouse_map_key_clear(BTN_TRIGGER_HAPPY9); break;
+               default:
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
 static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        unsigned int *rsize)
 {
 static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        unsigned int *rsize)
 {
@@ -206,6 +349,15 @@ static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        else if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD == hdev->product)
                ret = samsung_kbd_input_mapping(hdev,
                        hi, field, usage, bit, max);
        else if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD == hdev->product)
                ret = samsung_kbd_input_mapping(hdev,
                        hi, field, usage, bit, max);
+       else if(USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD == hdev->product)
+               ret = samsung_gamepad_input_mapping(hdev,
+                       hi, field, usage, bit, max);
+       else if(USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE == hdev->product)
+               ret = samsung_actionmouse_input_mapping(hdev,
+                       hi, field, usage, bit, max);
+       else if(USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD == hdev->product)
+               ret = samsung_universal_kbd_input_mapping(hdev,
+                       hi, field, usage, bit, max);
 
        return ret;
 }
 
        return ret;
 }
@@ -245,6 +397,9 @@ static const struct hid_device_id samsung_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_GAMEPAD) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_ACTIONMOUSE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SAMSUNG_ELECTRONICS, USB_DEVICE_ID_SAMSUNG_WIRELESS_UNIVERSAL_KBD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, samsung_devices);
        { }
 };
 MODULE_DEVICE_TABLE(hid, samsung_devices);
index 58103d4..53e5733 100644 (file)
@@ -718,4 +718,9 @@ config SLP_PROCESS_MON
          Providing monitoring important processes. Users can register the process
          with sysfs.
 
          Providing monitoring important processes. Users can register the process
          with sysfs.
 
+config SLP_GLOBAL_LOCK
+       bool "SLP Global Lock"
+       help
+         This supports global lock feature for SLP.
+
 endif # MISC_DEVICES
 endif # MISC_DEVICES
index f2195e2..39669be 100644 (file)
@@ -87,6 +87,7 @@ obj-y                         += 2mic/
 
 obj-$(CONFIG_SLP_PROCESS_MON)  += slp_process_monitor.o
 obj-$(CONFIG_SLP_LOWMEM_NOTIFY)        += slp_lowmem_notify.o
 
 obj-$(CONFIG_SLP_PROCESS_MON)  += slp_process_monitor.o
 obj-$(CONFIG_SLP_LOWMEM_NOTIFY)        += slp_lowmem_notify.o
+obj-$(CONFIG_SLP_GLOBAL_LOCK)  += slp_global_lock.o
 
 obj-$(CONFIG_MACH_M0_CTC)   += cw_tty.o
 
 
 obj-$(CONFIG_MACH_M0_CTC)   += cw_tty.o
 
diff --git a/drivers/misc/slp_global_lock.c b/drivers/misc/slp_global_lock.c
new file mode 100644 (file)
index 0000000..01f2d38
--- /dev/null
@@ -0,0 +1,903 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+
+#include "slp_global_lock.h"
+
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+#include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#endif
+
+#define SGL_WARN(x, y...)      printk(KERN_INFO "[SGL_WARN(%d,%d)](%s):%d " x " \n" , current->tgid, current->pid, __FUNCTION__, __LINE__, ##y)
+#define SGL_INFO(x, y...)      printk(KERN_INFO "[SGL_INFO] " x , ##y)
+
+#if 0 /* LOG */
+
+#define SGL_LOG(x, y...)       printk(KERN_INFO "[SGL_LOG(%d,%d)](%s):%d " x " \n" , current->tgid, current->pid, __FUNCTION__, __LINE__, ##y);
+#define SGL_DEBUG(x, y...)     printk(KERN_INFO "[SGL_DEBUG(%d,%d)](%s):%d " x " \n" , current->tgid, current->pid, __FUNCTION__, __LINE__, ##y);
+
+#else
+
+#define SGL_LOG(x, y...)
+#define SGL_DEBUG(x, y...)
+
+#endif
+
+static struct sgl_global {
+       int major;
+       struct class            *class;
+       struct device           *device;
+       void                            *locks;                 /* global lock table */
+       int                                     refcnt;                 /* ref count of sgl_global */
+       struct mutex            mutex;
+} sgl_global;
+
+struct sgl_session_data {
+       void                            *inited_locks;  /* per session initialized locks */
+       void                            *locked_locks;  /* per session locked locks */
+};
+
+struct sgl_lock {
+       unsigned int            key;                    /* key of this lock */
+       unsigned int            timeout_ms;             /* timeout in ms */
+       unsigned int            refcnt;                 /* ref count of initialization */
+       wait_queue_head_t       waiting_queue;  /* waiting queue */
+       struct list_head        waiting_list;   /* waiting list */
+       struct mutex            waiting_list_mutex;
+       unsigned int            locked;                 /* flag if this lock is locked */
+       unsigned int            owner;                  /* session data */
+
+       struct mutex            data_mutex;
+       unsigned int            user_data1;
+       unsigned int            user_data2;
+
+       pid_t                   owner_pid;
+       pid_t                   owner_tid;
+};
+
+/**************** hash code start ***************/
+#define SGL_HASH_BITS          4
+#define SGL_HASH_ENTRIES       (1 << SGL_HASH_BITS)
+
+struct sgl_hash_head {
+       struct hlist_head       head;                   /* hash_head */
+       struct mutex            mutex;
+};
+
+struct sgl_hash_node {
+       unsigned int            key;                    /* key for lock. must be same as lock->key */
+       struct sgl_lock         *lock;                  /* lock object */
+       struct hlist_node       node;                   /* hash node */
+};
+
+static const char sgl_dev_name[] = "slp_global_lock";
+
+/* find the sgl_lock object with key in the hash table */
+static struct sgl_hash_node *sgl_hash_get_node(struct sgl_hash_head *hash, unsigned int key)
+{
+       struct sgl_hash_head *hash_head = &hash[hash_32(key, SGL_HASH_BITS)];
+       struct sgl_hash_node *hash_node = NULL;
+       struct sgl_hash_node *found = NULL;
+
+       struct hlist_head *head = &hash_head->head;
+       struct hlist_node *pos;
+
+       SGL_LOG("key %d", key);
+
+       mutex_lock(&hash_head->mutex);
+       hlist_for_each_entry(hash_node, pos, head, node) {
+               if (hash_node->key == key) {
+                       found = hash_node;
+                       break;
+               }
+       }
+       mutex_unlock(&hash_head->mutex);
+
+       SGL_LOG("hash_node: %p", hash_node);
+
+       return found;
+}
+
+/* insert the hash entry */
+static struct sgl_hash_node *sgl_hash_insert_node(struct sgl_hash_head *hash, unsigned int key)
+{
+       struct sgl_hash_head *hash_head = &hash[hash_32(key, SGL_HASH_BITS)];
+       struct sgl_hash_node *hash_node;
+
+       struct hlist_head *head = &hash_head->head;
+
+       SGL_LOG("key %d", key);
+
+       hash_node = kzalloc(sizeof(struct sgl_hash_node), GFP_KERNEL);
+       if (hash_node == NULL)
+               return NULL;
+
+       INIT_HLIST_NODE(&hash_node->node);
+       mutex_lock(&hash_head->mutex);
+       hlist_add_head(&hash_node->node, head);
+       mutex_unlock(&hash_head->mutex);
+
+       hash_node->key = key;
+
+       SGL_LOG();
+
+       return hash_node;
+}
+
+/* remove the hash entry */
+static int sgl_hash_remove_node(struct sgl_hash_head *hash, unsigned int key)
+{
+       struct sgl_hash_head *hash_head = &hash[hash_32(key, SGL_HASH_BITS)];
+       struct sgl_hash_node *hash_node;
+
+       struct hlist_head *head = &hash_head->head;
+       struct hlist_node *pos;
+
+       int err = -ENOENT;
+
+       SGL_LOG("key %d", key);
+
+       mutex_lock(&hash_head->mutex);
+       hlist_for_each_entry(hash_node, pos, head, node) {
+               if (hash_node->key == key) {
+                       hlist_del(&hash_node->node);
+                       kfree(hash_node);
+                       err = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&hash_head->mutex);
+
+       SGL_LOG();
+
+       return err;
+}
+
+static int sgl_hash_cleanup_nodes(struct sgl_hash_head *hash, int (*lock_cleanup_func)(struct sgl_lock *))
+{
+       struct sgl_hash_node *hash_node;
+
+       struct hlist_head *head;
+
+       int i;
+       int err = 0;
+
+       SGL_LOG();
+
+       for (i = 0; i < SGL_HASH_ENTRIES; i++) {
+               head = &hash[i].head;
+               mutex_lock(&hash->mutex);
+               while (!hlist_empty(head)) {
+                       hash_node = hlist_entry(head->first, struct sgl_hash_node, node);
+                       if (lock_cleanup_func(hash_node->lock) < 0)
+                               err = -EBADRQC;
+                       hlist_del(&hash_node->node);
+                       kfree(hash_node);
+               }
+               mutex_unlock(&hash->mutex);
+       }
+
+       SGL_LOG();
+
+       return err;
+}
+
+/* allocate the hash table */
+static struct sgl_hash_head *sgl_hash_create_table(void)
+{
+       struct sgl_hash_head *hash;
+
+       int i;
+
+       SGL_LOG();
+
+       hash = kzalloc(sizeof(struct sgl_hash_head) * SGL_HASH_ENTRIES, GFP_KERNEL);
+       if (hash == NULL)
+               return NULL;
+
+       for (i = 0; i < SGL_HASH_ENTRIES; i++) {
+               INIT_HLIST_HEAD(&hash[i].head);
+               mutex_init(&hash[i].mutex);
+       }
+
+       SGL_LOG();
+
+       return hash;
+}
+
+/* release the hash table */
+static void sgl_hash_destroy_table(struct sgl_hash_head *hash)
+{
+       SGL_LOG();
+
+       kfree(hash);
+
+       SGL_LOG();
+
+       return;
+}
+
+/**************** hash code end ***************/
+
+static struct sgl_lock *sgl_get_lock(void *locks, unsigned int key)
+{
+       struct sgl_hash_node *hash_node;
+
+       hash_node = sgl_hash_get_node((struct sgl_hash_head *)locks, key);
+       if (hash_node == NULL) {
+               return NULL;
+       }
+
+       return hash_node->lock;
+}
+
+static int sgl_insert_lock(void *locks, struct sgl_lock *lock)
+{
+       struct sgl_hash_node *hash_node;
+
+       hash_node = sgl_hash_insert_node((struct sgl_hash_head *)locks, lock->key);
+       if (hash_node == NULL)
+               return -ENOMEM;
+       hash_node->lock = lock;
+
+       return 0;
+}
+
+static int sgl_remove_lock(void *locks, unsigned int key)
+{
+       int err;
+
+       err = sgl_hash_remove_node((struct sgl_hash_head *)locks, key);
+
+       return err;
+}
+
+static int sgl_cleanup_locks(void *locks, int (*lock_cleanup_func)(struct sgl_lock *))
+{
+       int err = 0;
+
+       err = sgl_hash_cleanup_nodes((struct sgl_hash_head *)locks, lock_cleanup_func);
+
+       return err;
+}
+
+static void *sgl_create_locks(void)
+{
+       return (void *)sgl_hash_create_table();
+}
+
+static void sgl_destroy_locks(void *locks)
+{
+       sgl_hash_destroy_table((struct sgl_hash_head *)locks);
+       return;
+}
+/********** lock - hash glue code end *********/
+
+
+static int sgl_lock_lock(struct sgl_session_data *session_data, unsigned int key)
+{
+       struct sgl_lock *lock;
+
+       struct list_head waiting_entry;
+
+       unsigned long jiffies;
+       long ret = 0;
+
+       SGL_LOG("key: %d", key);
+
+       mutex_lock(&sgl_global.mutex);
+       lock = sgl_get_lock(sgl_global.locks, key);
+       if (lock == NULL) {
+               if (sgl_get_lock(session_data->inited_locks, key))
+                       sgl_remove_lock(session_data->inited_locks, key);
+
+               if (sgl_get_lock(session_data->locked_locks, key))
+                       sgl_remove_lock(session_data->locked_locks, key);
+               mutex_unlock(&sgl_global.mutex);
+               SGL_WARN("lock is not in the global locks");
+               return -ENOENT;
+       }
+
+       lock = sgl_get_lock(session_data->inited_locks, key);
+       if (lock == NULL) {
+               mutex_unlock(&sgl_global.mutex);
+               SGL_WARN("lock is not in the inited locks");
+               return -ENOENT;
+       }
+       mutex_unlock(&sgl_global.mutex);
+
+       INIT_LIST_HEAD(&waiting_entry);
+       mutex_lock(&lock->data_mutex);
+       lock->refcnt++;
+       mutex_unlock(&lock->data_mutex);
+       mutex_lock(&lock->waiting_list_mutex);
+       list_add_tail(&waiting_entry, &lock->waiting_list);
+       mutex_unlock(&lock->waiting_list_mutex);
+
+       jiffies = msecs_to_jiffies(lock->timeout_ms);
+
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+    _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GLOBAL_TRY_LOCK,
+                               _mali_osk_get_pid(), _mali_osk_get_tid(), key, 0, 0);
+#endif
+
+       SGL_LOG();
+
+       ret = wait_event_timeout(lock->waiting_queue,
+                       ((lock->locked == 0)
+                       && lock->waiting_list.next == &waiting_entry),
+                       jiffies);
+       if (ret == 0) {
+               SGL_WARN("timed out, key: %d, owner(%d, %d)",
+                               key, lock->owner_pid, lock->owner_tid);
+               mutex_lock(&lock->data_mutex);
+               lock->refcnt--;
+               mutex_unlock(&lock->data_mutex);
+               mutex_lock(&lock->waiting_list_mutex);
+               list_del(&waiting_entry);
+               mutex_unlock(&lock->waiting_list_mutex);
+               return -ETIMEDOUT;
+       }
+
+       SGL_LOG();
+
+       mutex_lock(&lock->data_mutex);
+       lock->owner = (unsigned int)session_data;
+       lock->locked = 1;
+       lock->owner_pid = current->tgid;
+       lock->owner_tid = current->pid;
+       mutex_unlock(&lock->data_mutex);
+
+       mutex_lock(&lock->waiting_list_mutex);
+       list_del(&waiting_entry);
+       mutex_unlock(&lock->waiting_list_mutex);
+
+       /* add to the locked lock */
+       sgl_insert_lock(session_data->locked_locks, lock);
+
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+    _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GLOBAL_LOCK,
+                               _mali_osk_get_pid(), _mali_osk_get_tid(), key, 0, 0);
+#endif
+
+       SGL_LOG();
+
+       return 0;
+}
+
+static int _sgl_unlock_lock(struct sgl_lock *lock)
+{
+       SGL_LOG();
+
+       if (lock == NULL) {
+               SGL_WARN("lock == NULL");
+               return -EBADRQC;
+       }
+       mutex_lock(&lock->data_mutex);
+
+       if (lock->locked == 0) {
+               mutex_unlock(&lock->data_mutex);
+               SGL_WARN("tried to unlock not-locked lock");
+               return -EBADRQC;
+       }
+
+       lock->owner = 0;
+       lock->locked = 0;
+       lock->owner_pid = 0;
+       lock->owner_tid = 0;
+       lock->refcnt--;
+
+       if (waitqueue_active(&lock->waiting_queue)) {
+               wake_up(&lock->waiting_queue);
+       }
+       mutex_unlock(&lock->data_mutex);
+
+       SGL_LOG();
+
+       return 0;
+}
+
+static int sgl_unlock_lock(struct sgl_session_data *session_data, unsigned int key)
+{
+       struct sgl_lock *lock;
+
+       int err = -ENOENT;
+
+       SGL_LOG("key: %d", key);
+
+       mutex_lock(&sgl_global.mutex);
+       lock = sgl_get_lock(sgl_global.locks, key);
+       if (lock == NULL) {
+               if (sgl_get_lock(session_data->inited_locks, key))
+                       sgl_remove_lock(session_data->inited_locks, key);
+
+               if (sgl_get_lock(session_data->locked_locks, key))
+                       sgl_remove_lock(session_data->locked_locks, key);
+               mutex_unlock(&sgl_global.mutex);
+               SGL_WARN("lock is not in the global locks");
+               return -ENOENT;
+       }
+
+       lock = sgl_get_lock(session_data->inited_locks, key);
+       if (lock == NULL) {
+               mutex_unlock(&sgl_global.mutex);
+               SGL_WARN("lock is not in the inited locks");
+               return -ENOENT;
+       }
+       mutex_unlock(&sgl_global.mutex);
+
+       mutex_lock(&lock->data_mutex);
+       if (lock->owner != (unsigned int)session_data) {
+               mutex_unlock(&lock->data_mutex);
+               SGL_WARN("tried to unlock the lock not-owned by calling session");
+               return -EBADRQC;
+       }
+       mutex_unlock(&lock->data_mutex);
+       sgl_remove_lock(session_data->locked_locks, key);
+       err = _sgl_unlock_lock(lock);
+       if (err < 0)
+               SGL_WARN("_sgl_unlock_lock() failed");
+
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+    _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GLOBAL_UNLOCK,
+                               _mali_osk_get_pid(), _mali_osk_get_tid(), key, 0, 0);
+#endif
+
+
+       if (err < 0)
+               SGL_WARN("sgl_remove_lock() failed");
+
+       SGL_LOG();
+
+       return err;
+}
+
+static int sgl_init_lock(struct sgl_session_data *session_data, struct sgl_attribute *attr)
+{
+       struct sgl_lock *lock;
+
+       int err = 0;
+
+       SGL_LOG("key: %d", attr->key);
+
+       mutex_lock(&sgl_global.mutex);
+
+       lock = sgl_get_lock(sgl_global.locks, attr->key);
+       if (lock == NULL) {
+               /* allocate and add to the global table if this is the first initialization */
+               lock = kzalloc(sizeof(struct sgl_lock), GFP_KERNEL);
+               if (lock == NULL) {
+                       err = -ENOMEM;
+                       goto out_unlock;
+               }
+
+               lock->key = attr->key;
+
+               err = sgl_insert_lock(sgl_global.locks, lock);
+               if (err < 0)
+                       goto out_unlock;
+
+               /* default timeout value is 16ms */
+               lock->timeout_ms = attr->timeout_ms ? attr->timeout_ms : 16;
+
+               init_waitqueue_head(&lock->waiting_queue);
+               INIT_LIST_HEAD(&lock->waiting_list);
+               mutex_init(&lock->waiting_list_mutex);
+               mutex_init(&lock->data_mutex);
+       }
+       mutex_lock(&lock->data_mutex);
+       lock->refcnt++;
+       mutex_unlock(&lock->data_mutex);
+
+       /* add to the inited locks */
+       err = sgl_insert_lock(session_data->inited_locks, lock);
+
+out_unlock:
+
+       mutex_unlock(&sgl_global.mutex);
+
+       SGL_LOG();
+
+       return err;
+}
+
+static int _sgl_destroy_lock(struct sgl_lock *lock)
+{
+       int err = 0;
+
+       SGL_LOG();
+
+       if (lock == NULL) {
+               SGL_WARN("lock == NULL");
+               return -EBADRQC;
+       }
+
+       mutex_lock(&lock->data_mutex);
+       lock->refcnt--;
+       if (lock->refcnt == 0) {
+               mutex_unlock(&lock->data_mutex);
+               err = sgl_remove_lock(sgl_global.locks, lock->key);
+               if (err < 0)
+                       return err;
+
+               kfree(lock);
+       } else
+               mutex_unlock(&lock->data_mutex);
+
+       SGL_LOG();
+
+       return err;
+}
+
+static int sgl_destroy_lock(struct sgl_session_data *session_data, unsigned int key)
+{
+       struct sgl_lock *lock;
+
+       int err = 0;
+
+       SGL_LOG();
+
+       mutex_lock(&sgl_global.mutex);
+
+       lock = sgl_get_lock(sgl_global.locks, key);
+       if (lock == NULL) {
+               SGL_WARN("lock is not in the global locks");
+               err = -ENOENT;
+               goto out_unlock;
+       }
+       if (!sgl_get_lock(session_data->inited_locks, key)) {
+               SGL_WARN("lock is not in the inited locks");
+               err = -ENOENT;
+               goto out_unlock;
+       }
+
+       /* check if lock is still locked */
+       if (sgl_get_lock(session_data->locked_locks, key)) {
+               SGL_WARN("destroy failed. lock is still locked");
+               err = -EBUSY;
+               goto out_unlock;
+       }
+
+       err = _sgl_destroy_lock(lock);
+       if (err < 0)
+               goto out_unlock;
+
+       /* remove from the inited lock */
+       err = sgl_remove_lock(session_data->inited_locks, key);
+       if (err < 0)
+               goto out_unlock;
+
+out_unlock:
+
+       mutex_unlock(&sgl_global.mutex);
+
+       SGL_LOG();
+
+       return err;
+}
+
+static int sgl_set_data(struct sgl_session_data *session_data, struct sgl_user_data *user_data)
+{
+       struct sgl_lock *lock;
+       int ret = 0;
+       unsigned int key = user_data->key;
+
+       SGL_LOG("key: %d", key);
+
+       mutex_lock(&sgl_global.mutex);
+
+       lock = sgl_get_lock(sgl_global.locks, key);
+       if (lock == NULL) {
+               SGL_WARN("lock is not in the inited locks");
+               mutex_unlock(&sgl_global.mutex);
+               return -ENOENT;
+       }
+       mutex_lock(&lock->data_mutex);
+       lock->user_data1 = user_data->data1;
+       lock->user_data2 = user_data->data2;
+       user_data->locked = lock->locked;
+       mutex_unlock(&lock->data_mutex);
+       mutex_unlock(&sgl_global.mutex);
+
+       SGL_LOG();
+
+       return ret;
+}
+
+static int sgl_get_data(struct sgl_session_data *session_data, struct sgl_user_data *user_data)
+{
+       struct sgl_lock *lock;
+       int ret = 0;
+       unsigned int key = user_data->key;
+
+       SGL_LOG("key: %d", key);
+       mutex_lock(&sgl_global.mutex);
+
+       lock = sgl_get_lock(sgl_global.locks, key);
+       if (lock == NULL) {
+               SGL_WARN("lock is not in the inited locks");
+               mutex_unlock(&sgl_global.mutex);
+               return -ENOENT;
+       }
+       mutex_lock(&lock->data_mutex);
+       user_data->data1 = lock->user_data1;
+       user_data->data2 = lock->user_data2;
+       user_data->locked = lock->locked;
+       mutex_unlock(&lock->data_mutex);
+       mutex_unlock(&sgl_global.mutex);
+
+       SGL_LOG();
+
+       return ret;
+}
+
+static void sgl_dump_locks(void)
+{
+       int i;
+       SGL_INFO("SLP_GLOBAL_LOCK DUMP START\n");
+       mutex_lock(&sgl_global.mutex);
+       for (i = 0; i < SGL_HASH_ENTRIES; i++) {
+               struct sgl_hash_head *shead;
+               struct sgl_hash_node *snode;
+               struct hlist_head *hhead;
+               struct hlist_node *pos;
+
+               shead = &((struct sgl_hash_head *)sgl_global.locks)[i];
+               if (!shead)
+                       continue;
+               mutex_lock(&shead->mutex);
+               hhead = &shead->head;
+               hlist_for_each_entry(snode, pos, hhead, node) {
+                       struct sgl_lock *lock = snode->lock;
+                       mutex_lock(&lock->data_mutex);
+                       SGL_INFO("lock key: %d, refcnt: %d, owner_pid: %d, owner_tid: %d\n",
+                                       lock->key, lock->refcnt, lock->owner_pid, lock->owner_tid);
+                       mutex_unlock(&lock->data_mutex);
+               }
+               mutex_unlock(&shead->mutex);
+       }
+       mutex_unlock(&sgl_global.mutex);
+       SGL_INFO("SLP_GLOBAL_LOCK DUMP END\n");
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long sgl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+#else
+static int sgl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+#endif
+{
+       struct sgl_session_data *session_data = (struct sgl_session_data *)file->private_data;
+
+       int err = 0;
+
+       SGL_LOG();
+
+       switch (cmd) {
+       case SGL_IOC_INIT_LOCK:
+               /* destroy lock with attribute */
+               err = sgl_init_lock(session_data, (struct sgl_attribute *)arg);
+               break;
+       case SGL_IOC_DESTROY_LOCK:
+               /* destroy lock with id(=arg) */
+               err = sgl_destroy_lock(session_data, (unsigned int)arg);
+               break;
+       case SGL_IOC_LOCK_LOCK:
+               /* lock lock with id(=arg) */
+               err = sgl_lock_lock(session_data, (unsigned int)arg);
+               break;
+       case SGL_IOC_UNLOCK_LOCK:
+               /* unlock lock with id(=arg) */
+               err = sgl_unlock_lock(session_data, (unsigned int)arg);
+               break;
+       case SGL_IOC_SET_DATA:
+               err = sgl_set_data(session_data, (struct sgl_user_data *)arg);
+               break;
+       case SGL_IOC_GET_DATA:
+               err = sgl_get_data(session_data, (struct sgl_user_data *)arg);
+               break;
+       case SGL_IOC_DUMP_LOCKS:
+               sgl_dump_locks();
+               break;
+       default:
+                       SGL_WARN("unknown type of ioctl command");
+               break;
+       }
+
+       SGL_LOG();
+
+    return err;
+}
+
+static int sgl_open(struct inode *inode, struct file *file)
+{
+       struct sgl_session_data *session_data;
+
+       SGL_LOG();
+
+       /* init per thread data using file->private_data*/
+       session_data = kzalloc(sizeof(struct sgl_session_data), GFP_KERNEL);
+       if (session_data == NULL)
+               goto err_session_data;
+
+       session_data->inited_locks = sgl_create_locks();
+       if (session_data->inited_locks == NULL) {
+               goto err_inited_locks;
+       }
+
+       session_data->locked_locks = sgl_create_locks();
+       if (session_data->locked_locks == NULL) {
+               goto err_locked_locks;
+       }
+
+       file->private_data = (void *)session_data;
+
+       sgl_global.refcnt++;
+
+       SGL_LOG();
+
+    return 0;
+
+err_locked_locks:
+       sgl_destroy_locks(session_data->inited_locks);
+err_inited_locks:
+       kfree(session_data);
+err_session_data:
+       SGL_WARN();
+       return -ENOMEM;
+}
+
+static int sgl_release(struct inode *inode, struct file *file)
+{
+       struct sgl_session_data *session_data = file->private_data;
+
+       int err = 0;
+
+       SGL_LOG();
+
+       mutex_lock(&sgl_global.mutex);
+
+       /* clean up the locked locks */
+       if (sgl_cleanup_locks(session_data->locked_locks, _sgl_unlock_lock))
+               SGL_WARN("clean-up locked locks failed");
+
+       /* clean up the inited locks */
+       if (sgl_cleanup_locks(session_data->inited_locks, _sgl_destroy_lock))
+               SGL_WARN("clean-up inited locks failed");
+
+       /* clean up per thread data */
+       file->private_data = NULL;
+
+       sgl_destroy_locks(session_data->locked_locks);
+       sgl_destroy_locks(session_data->inited_locks);
+
+       kfree(session_data);
+
+       mutex_unlock(&sgl_global.mutex);
+       sgl_global.refcnt--;
+       if (sgl_global.refcnt == 0) {
+               /* destroy global lock table */
+               sgl_destroy_locks(sgl_global.locks);
+
+           device_destroy(sgl_global.class, MKDEV(sgl_global.major, 0));
+           class_destroy(sgl_global.class);
+           unregister_chrdev(sgl_global.major, sgl_dev_name);
+       }
+
+       SGL_LOG();
+
+    return err;
+}
+
+static const struct file_operations sgl_ops = {
+       .owner = THIS_MODULE,
+    .open = sgl_open,
+    .release = sgl_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+       .unlocked_ioctl = sgl_ioctl,
+#else
+       .ioctl = sgl_ioctl,
+#endif
+};
+
+static int __init sgl_init(void)
+{
+       int err = 0;
+
+       SGL_LOG();
+
+       memset(&sgl_global, 0, sizeof(struct sgl_global));
+
+       sgl_global.major = SGL_MAJOR;
+       err = register_chrdev(sgl_global.major, sgl_dev_name, &sgl_ops);
+       if (err < 0)
+               goto err_register_chrdev;
+
+    sgl_global.class = class_create(THIS_MODULE, sgl_dev_name);
+       if (IS_ERR(sgl_global.class)) {
+               err = PTR_ERR(sgl_global.class);
+               goto err_class_create;
+       }
+
+    sgl_global.device = device_create(sgl_global.class, NULL, MKDEV(sgl_global.major, 0), NULL, sgl_dev_name);
+       if (IS_ERR(sgl_global.device)) {
+               err = PTR_ERR(sgl_global.device);
+               goto err_device_create;
+       }
+
+       /* create the global lock table */
+       sgl_global.locks = sgl_create_locks();
+       if (sgl_global.locks == NULL) {
+               err = -ENOMEM;
+               goto err_create_locks;
+       }
+
+       mutex_init(&sgl_global.mutex);
+
+       sgl_global.refcnt++;
+
+       SGL_LOG();
+
+    return 0;
+
+err_create_locks:
+err_device_create:
+    class_unregister(sgl_global.class);
+err_class_create:
+    unregister_chrdev(sgl_global.major, sgl_dev_name);
+err_register_chrdev:
+       SGL_WARN();
+       return err;
+}
+
+void sgl_exit(void)
+{
+       SGL_LOG();
+
+       sgl_global.refcnt--;
+       if (sgl_global.refcnt == 0) {
+               mutex_destroy(&sgl_global.mutex);
+
+               /* destroy global lock table */
+               sgl_destroy_locks(sgl_global.locks);
+
+           device_destroy(sgl_global.class, MKDEV(sgl_global.major, 0));
+           class_destroy(sgl_global.class);
+           unregister_chrdev(sgl_global.major, sgl_dev_name);
+       }
+
+       SGL_LOG();
+
+    return;
+}
+
+module_init(sgl_init);
+module_exit(sgl_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/slp_global_lock.h b/drivers/misc/slp_global_lock.h
new file mode 100644 (file)
index 0000000..db4d585
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __SLP_GLOBAL_LOCK_H__
+#define __SLP_GLOBAL_LOCK_H__
+
+#include <linux/ioctl.h>
+
+#define SGL_IOC_BASE   0x32
+#define SGL_MAJOR      224
+
+struct sgl_attribute {
+       unsigned int key;
+       unsigned int timeout_ms;
+};
+
+struct sgl_user_data {
+       unsigned int key;
+       unsigned int data1;
+       unsigned int data2;
+       unsigned int locked;
+};
+
+typedef enum {
+       _SGL_INIT_LOCK = 1,
+       _SGL_DESTROY_LOCK,
+       _SGL_LOCK_LOCK,
+       _SGL_UNLOCK_LOCK,
+       _SGL_SET_DATA,
+       _SGL_GET_DATA,
+       _SGL_DUMP_LOCKS,
+} _sgl_ioctls;
+
+#define SGL_IOC_INIT_LOCK                      _IOW(SGL_IOC_BASE, _SGL_INIT_LOCK, struct sgl_attribute *)
+#define SGL_IOC_DESTROY_LOCK           _IOW(SGL_IOC_BASE, _SGL_DESTROY_LOCK, unsigned int)
+#define SGL_IOC_LOCK_LOCK                      _IOW(SGL_IOC_BASE, _SGL_LOCK_LOCK, unsigned int)
+#define SGL_IOC_UNLOCK_LOCK                    _IOW(SGL_IOC_BASE, _SGL_UNLOCK_LOCK, unsigned int)
+#define SGL_IOC_SET_DATA                       _IOW(SGL_IOC_BASE, _SGL_SET_DATA, struct sgl_user_data *)
+#define SGL_IOC_GET_DATA                       _IOW(SGL_IOC_BASE, _SGL_GET_DATA, struct sgl_user_data *)
+#define SGL_IOC_DUMP_LOCKS                     _IOW(SGL_IOC_BASE, _SGL_DUMP_LOCKS, void *)
+
+#endif /* __SLP_GLOBAL_LOCK_H__ */
index 6f56b56..0fde098 100644 (file)
@@ -129,7 +129,7 @@ static int akm8975_ecs_set_mode(struct akm8975_data *akm, char mode)
        udelay(300);
 
        /* Workaround: Some sensors (defective?) require more delay */
        udelay(300);
 
        /* Workaround: Some sensors (defective?) require more delay */
-       msleep(5);
+       mdelay(5);
 
        return 0;
 }
 
        return 0;
 }
@@ -223,6 +223,11 @@ static int akm8975_get_raw_data(struct akm8975_data *akm,
                goto done;
        }
 
                goto done;
        }
 
+       /* akm8975_wait_for_data_ready() revice IRQ signal.
+        * But, akm8975 is not ready to send magnetic data.
+        */
+       mdelay(5);
+
        ret = i2c_smbus_read_i2c_block_data(akm->this_client,
                        AK8975_REG_ST1, sizeof(data), data);
        if (ret != sizeof(data)) {
        ret = i2c_smbus_read_i2c_block_data(akm->this_client,
                        AK8975_REG_ST1, sizeof(data), data);
        if (ret != sizeof(data)) {
@@ -468,7 +473,7 @@ static void ak8975c_selftest(struct akm8975_data *ak_data)
 
        /* wait for data ready */
        while (1) {
 
        /* wait for data ready */
        while (1) {
-               msleep(20);
+               mdelay(5);
                if (i2c_smbus_read_byte_data(ak_data->this_client,
                                                AK8975_REG_ST1) == 1) {
                        break;
                if (i2c_smbus_read_byte_data(ak_data->this_client,
                                                AK8975_REG_ST1) == 1) {
                        break;
@@ -620,7 +625,8 @@ static ssize_t ak8975_adc(struct device *dev,
                pr_err("%s: wait for data ready failed\n", __func__);
                return err;
        }
                pr_err("%s: wait for data ready failed\n", __func__);
                return err;
        }
-       msleep(20);
+
+       mdelay(5);
        /* get the value and report it */
        err = i2c_smbus_read_i2c_block_data(ak_data->this_client,
                                        AK8975_REG_ST1, sizeof(buf), buf);
        /* get the value and report it */
        err = i2c_smbus_read_i2c_block_data(ak_data->this_client,
                                        AK8975_REG_ST1, sizeof(buf), buf);
index f343808..349d165 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/uaccess.h>
 #include <linux/consolemap.h>
 #include <linux/vt_kern.h>
 #include <asm/uaccess.h>
 #include <linux/consolemap.h>
 #include <linux/vt_kern.h>
+#include <linux/delay.h>
 
 static unsigned short translations[][256] = {
   /* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
 
 static unsigned short translations[][256] = {
   /* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
@@ -477,6 +478,10 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
        if (!(p2 = p1[n = (unicode >> 6) & 0x1f])) {
                p2 = p1[n] = kmalloc(64*sizeof(u16), GFP_KERNEL);
                if (!p2) return -ENOMEM;
        if (!(p2 = p1[n = (unicode >> 6) & 0x1f])) {
                p2 = p1[n] = kmalloc(64*sizeof(u16), GFP_KERNEL);
                if (!p2) return -ENOMEM;
+
+               /* WORKAROUND: This delay time prevent NULL point error. */
+               udelay(1);
+
                memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
        }
 
                memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
        }
 
index 217cfd7..c7bf352 100644 (file)
@@ -25,6 +25,8 @@ source "drivers/gpu/stub/Kconfig"
 
 source "drivers/gpu/ion/Kconfig"
 
 
 source "drivers/gpu/ion/Kconfig"
 
+source "drivers/gpu/arm/Kconfig"
+
 config VGASTATE
        tristate
        default n
 config VGASTATE
        tristate
        default n
index 5b831df..ab320e7 100644 (file)
@@ -777,6 +777,7 @@ struct drm_event_vblank {
 #define DRM_CAP_VBLANK_HIGH_CRTC 0x2
 #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4
 #define DRM_CAP_VBLANK_HIGH_CRTC 0x2
 #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
 
 /* typedef area */
 #ifndef __KERNEL__
 
 /* typedef area */
 #ifndef __KERNEL__
index dd73104..ef5eaf5 100644 (file)
@@ -1439,6 +1439,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
                                     struct timeval *vblanktime);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
                                     struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+                                    struct drm_pending_vblank_event *e);
 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
@@ -1511,6 +1513,7 @@ extern unsigned int drm_debug;
 
 extern unsigned int drm_vblank_offdelay;
 extern unsigned int drm_timestamp_precision;
 
 extern unsigned int drm_vblank_offdelay;
 extern unsigned int drm_timestamp_precision;
+extern unsigned int drm_timestamp_monotonic;
 
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
 
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
@@ -1563,9 +1566,8 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
 
 int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
 int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
 
 int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
 int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
index 5b449ee..f1ab763 100644 (file)
@@ -446,6 +446,8 @@ struct input_keymap_entry {
 #define KEY_FOLDER_CLOSE       252  /*only use Grande CHN CTC */
 #define KEY_3G 253  /*only use Grande CHN CTC */
 
 #define KEY_FOLDER_CLOSE       252  /*only use Grande CHN CTC */
 #define KEY_3G 253  /*only use Grande CHN CTC */
 
+#define KEY_RECENT             254     /* Key recent for Samsung game pad */
+
 /* Code 255 is reserved for special needs of AT keyboard driver */
 
 #define BTN_MISC               0x100
 /* Code 255 is reserved for special needs of AT keyboard driver */
 
 #define BTN_MISC               0x100
@@ -501,6 +503,7 @@ struct input_keymap_entry {
 #define BTN_MODE               0x13c
 #define BTN_THUMBL             0x13d
 #define BTN_THUMBR             0x13e
 #define BTN_MODE               0x13c
 #define BTN_THUMBL             0x13d
 #define BTN_THUMBR             0x13e
+#define BTN_GAME               0x13f /* Add game button for samsung bluetooth keypad */
 
 #define BTN_DIGI               0x140
 #define BTN_TOOL_PEN           0x140
 
 #define BTN_DIGI               0x140
 #define BTN_TOOL_PEN           0x140
index cd32370..61ce7cd 100644 (file)
@@ -28,7 +28,11 @@ BuildRequires:  module-init-tools
 %ifarch %arm
 BuildRequires:  u-boot-tools
 %endif
 %ifarch %arm
 BuildRequires:  u-boot-tools
 %endif
-ExclusiveArch: %arm i586 i686
+ExclusiveArch: %arm
+
+%if "%{?tizen_profile_name}" != "mobile"
+ExcludeArch: %{arm}
+%endif
 
 %package -n %{variant}-linux-kernel
 Summary: Tizen kernel
 
 %package -n %{variant}-linux-kernel
 Summary: Tizen kernel
@@ -41,14 +45,11 @@ The Linux Kernel, the operating system core itself
 %description -n %{variant}-linux-kernel
 This package contains the Linux kernel for Tizen (%{profile} profile, arch %{buildarch}, target board %{target_board})
 
 %description -n %{variant}-linux-kernel
 This package contains the Linux kernel for Tizen (%{profile} profile, arch %{buildarch}, target board %{target_board})
 
-%package -n kernel-headers-%{name}
+%package -n %{variant}-linux-kernel-headers
 Summary:        Linux support headers for userspace development
 Group:          Development/System
 Summary:        Linux support headers for userspace development
 Group:          Development/System
-Obsoletes:     kernel-headers
-Provides:      kernel-headers = %{version}-%{release}
-ExclusiveArch: %arm i586 i686
 
 
-%description -n kernel-headers-%{name}
+%description -n %{variant}-linux-kernel-headers
 This package provides userspaces headers from the Linux kernel.  These
 headers are used by the installed headers for GNU glibc and other system
  libraries.
 This package provides userspaces headers from the Linux kernel.  These
 headers are used by the installed headers for GNU glibc and other system
  libraries.
@@ -58,7 +59,6 @@ Summary:      Linux kernel modules
 Group:          Development/System
 Provides: kernel-modules = %{fullVersion}
 Provides: kernel-modules-uname-r = %{fullVersion}
 Group:          Development/System
 Provides: kernel-modules = %{fullVersion}
 Provides: kernel-modules-uname-r = %{fullVersion}
-ExclusiveArch: %arm
 
 %description -n %{variant}-linux-kernel-modules
 This package provides kernel modules.
 
 %description -n %{variant}-linux-kernel-modules
 This package provides kernel modules.
@@ -68,7 +68,6 @@ Summary:        Prebuild Linux kernel
 Group:          Development/System
 Provides: kernel-devel = %{fullVersion}
 Provides: kernel-devel-uname-r = %{fullVersion}
 Group:          Development/System
 Provides: kernel-devel = %{fullVersion}
 Provides: kernel-devel-uname-r = %{fullVersion}
-ExclusiveArch: %arm
 
 %description -n %{variant}-linux-kernel-devel
 Prebuild linux kernel
 
 %description -n %{variant}-linux-kernel-devel
 Prebuild linux kernel
@@ -76,7 +75,6 @@ Prebuild linux kernel
 %package -n %{variant}-linux-kernel-debug
 Summary:       Debug package for %{variant} kernel
 Group:          Development/System
 %package -n %{variant}-linux-kernel-debug
 Summary:       Debug package for %{variant} kernel
 Group:          Development/System
-ExclusiveArch: %arm
 
 %description -n %{variant}-linux-kernel-debug
 Debug package for %{variant} kernel
 
 %description -n %{variant}-linux-kernel-debug
 Debug package for %{variant} kernel
@@ -86,7 +84,6 @@ Debug package for %{variant} kernel
 
 %build
 # 1. Compile sources
 
 %build
 # 1. Compile sources
-%ifarch %arm
 # Make sure EXTRAVERSION says what we want it to say
 sed -i "s/^EXTRAVERSION.*/EXTRAVERSION = -%{config_name}/" Makefile
 
 # Make sure EXTRAVERSION says what we want it to say
 sed -i "s/^EXTRAVERSION.*/EXTRAVERSION = -%{config_name}/" Makefile
 
@@ -95,7 +92,6 @@ make %{?_smp_mflags} uImage
 
 # 2. Build modules
 make modules %{?_smp_mflags}
 
 # 2. Build modules
 make modules %{?_smp_mflags}
-%endif
 
 # 4. Create tar repo for build directory
 tar cpf linux-kernel-build-%{fullVersion}.tar .
 
 # 4. Create tar repo for build directory
 tar cpf linux-kernel-build-%{fullVersion}.tar .
@@ -107,7 +103,6 @@ QA_SKIP_BUILD_ROOT="DO_NOT_WANT"; export QA_SKIP_BUILD_ROOT
 # 1. Destination directories
 mkdir -p %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}
 
 # 1. Destination directories
 mkdir -p %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}
 
-%ifarch %arm
 mkdir -p %{buildroot}/boot/
 mkdir -p %{buildroot}/lib/modules/%{fullVersion}
 mkdir -p %{buildroot}/var/tmp/kernel/
 mkdir -p %{buildroot}/boot/
 mkdir -p %{buildroot}/lib/modules/%{fullVersion}
 mkdir -p %{buildroot}/var/tmp/kernel/
@@ -123,12 +118,10 @@ install -m 644 System.map %{buildroot}/var/tmp/kernel/System.map
 
 # 3. Install modules
 make -j8 INSTALL_MOD_STRIP=1 INSTALL_MOD_PATH=%{buildroot}/ modules_install KERNELRELEASE=%{fullVersion}
 
 # 3. Install modules
 make -j8 INSTALL_MOD_STRIP=1 INSTALL_MOD_PATH=%{buildroot}/ modules_install KERNELRELEASE=%{fullVersion}
-%endif
 
 # 4. Install kernel headers
 make -j8 INSTALL_PATH=%{buildroot} INSTALL_MOD_PATH=%{buildroot} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
 
 
 # 4. Install kernel headers
 make -j8 INSTALL_PATH=%{buildroot} INSTALL_MOD_PATH=%{buildroot} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
 
-%ifarch %arm
 # 5. Restore source and build irectory
 tar -xf linux-kernel-build-%{fullVersion}.tar -C %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}
 #ls %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}
 # 5. Restore source and build irectory
 tar -xf linux-kernel-build-%{fullVersion}.tar -C %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}
 #ls %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}
@@ -156,12 +149,10 @@ find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -name "*.S" -exec rm
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -name "*.s" -exec rm -f {} \;
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -name "*.c" -not -path "%{buildroot}/usr/src/linux-kernel-build-%{fullVersion}/scripts/*" -exec rm -f {} \;
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -size 0c -exec rm -f {} \;
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -name "*.s" -exec rm -f {} \;
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -name "*.c" -not -path "%{buildroot}/usr/src/linux-kernel-build-%{fullVersion}/scripts/*" -exec rm -f {} \;
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion} -size 0c -exec rm -f {} \;
-%endif
 
 find %{buildroot}/usr/include -name "\.install"  -exec rm -f {} \;
 find %{buildroot}/usr -name "..install.cmd" -exec rm -f {} \;
 
 
 find %{buildroot}/usr/include -name "\.install"  -exec rm -f {} \;
 find %{buildroot}/usr -name "..install.cmd" -exec rm -f {} \;
 
-%ifarch %arm
 # 6.1 Clean Documentation directory
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}/Documentation -type f ! -name "Makefile" ! -name "*.sh" ! -name "*.pl" -exec rm -f {} \;
 
 # 6.1 Clean Documentation directory
 find %{buildroot}/usr/src/linux-kernel-build-%{fullVersion}/Documentation -type f ! -name "Makefile" ! -name "*.sh" ! -name "*.pl" -exec rm -f {} \;
 
@@ -181,16 +172,14 @@ find %{buildroot}/lib/modules/ -name "*.ko"
 rm -f %{buildroot}/lib/modules/%{fullVersion}/build
 rm -f %{buildroot}/lib/modules/%{fullVersion}/source
 ln -sf /usr/src/linux-kernel-build-%{fullVersion} %{buildroot}/lib/modules/%{fullVersion}/build
 rm -f %{buildroot}/lib/modules/%{fullVersion}/build
 rm -f %{buildroot}/lib/modules/%{fullVersion}/source
 ln -sf /usr/src/linux-kernel-build-%{fullVersion} %{buildroot}/lib/modules/%{fullVersion}/build
-%endif
 
 %clean
 rm -rf %{buildroot}
 
 
 %clean
 rm -rf %{buildroot}
 
-%files -n kernel-headers-%{name}
+%files -n %{variant}-linux-kernel-headers
 %defattr(-,root,root)
 /usr/include/*
 
 %defattr(-,root,root)
 /usr/include/*
 
-%ifarch %arm
 %files -n %{variant}-linux-kernel-devel
 %defattr(-,root,root)
 /usr/src/linux-kernel-build-%{fullVersion}
 %files -n %{variant}-linux-kernel-devel
 %defattr(-,root,root)
 /usr/src/linux-kernel-build-%{fullVersion}
@@ -214,4 +203,3 @@ rm -rf %{buildroot}
 /boot/System.map*
 /boot/config*
 /boot/uImage
 /boot/System.map*
 /boot/config*
 /boot/uImage
-%endif
index 80dd59f..42c4775 100644 (file)
@@ -137,6 +137,11 @@ struct smk_port_label {
        struct smack_known      *smk_out;       /* outgoing label */
 };
 
        struct smack_known      *smk_out;       /* outgoing label */
 };
 
+struct smack_onlycap {
+       struct list_head        list;
+       struct smack_known      *smk_label;
+};
+
 /*
  * Mount options
  */
 /*
  * Mount options
  */
@@ -235,6 +240,7 @@ struct smack_known *smk_import_entry(const char *, int);
 void smk_insert_entry(struct smack_known *skp);
 struct smack_known *smk_find_entry(const char *);
 u32 smack_to_secid(const char *);
 void smk_insert_entry(struct smack_known *skp);
 struct smack_known *smk_find_entry(const char *);
 u32 smack_to_secid(const char *);
+int smack_privileged(int cap);
 
 /*
  * Shared data.
 
 /*
  * Shared data.
@@ -242,7 +248,6 @@ u32 smack_to_secid(const char *);
 extern int smack_cipso_direct;
 extern int smack_cipso_mapped;
 extern struct smack_known *smack_net_ambient;
 extern int smack_cipso_direct;
 extern int smack_cipso_mapped;
 extern struct smack_known *smack_net_ambient;
-extern char *smack_onlycap;
 extern const char *smack_cipso_option;
 
 extern struct smack_known smack_known_floor;
 extern const char *smack_cipso_option;
 
 extern struct smack_known smack_known_floor;
@@ -262,6 +267,9 @@ extern struct kmem_cache *smack_master_list_cache;
 
 extern struct security_operations smack_ops;
 
 
 extern struct security_operations smack_ops;
 
+extern struct mutex     smack_onlycap_lock;
+extern struct list_head smack_onlycap_list;
+
 #define SMACK_HASH_SLOTS 16
 extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
 
 #define SMACK_HASH_SLOTS 16
 extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
 
@@ -307,21 +315,6 @@ static inline struct smack_known *smk_of_current(void)
        return smk_of_task(current_security());
 }
 
        return smk_of_task(current_security());
 }
 
-/*
- * Is the task privileged and allowed to be privileged
- * by the onlycap rule.
- */
-static inline int smack_privileged(int cap)
-{
-       struct smack_known *skp = smk_of_current();
-
-       if (!capable(cap))
-               return 0;
-       if (smack_onlycap == NULL || smack_onlycap == skp->smk_known)
-               return 1;
-       return 0;
-}
-
 #ifdef CONFIG_SECURITY_SMACK_PERMISSIVE_MODE
 /*
  * permissive mode
 #ifdef CONFIG_SECURITY_SMACK_PERMISSIVE_MODE
 /*
  * permissive mode
index de73a94..8e512cc 100644 (file)
@@ -601,3 +601,44 @@ u32 smack_to_secid(const char *smack)
                return 0;
        return skp->smk_secid;
 }
                return 0;
        return skp->smk_secid;
 }
+
+/*
+ * Unless a process is running with one of these labels
+ * even having CAP_MAC_OVERRIDE isn't enough to grant
+ * privilege to violate MAC policy. If no labels are
+ * designated (the empty list case) capabilities apply to
+ * everyone.
+ */
+LIST_HEAD(smack_onlycap_list);
+DEFINE_MUTEX(smack_onlycap_lock);
+
+/*
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ *
+ * Returns 1 if the task is allowed to be privileged, 0 if it's not.
+ */
+int smack_privileged(int cap)
+{
+       struct smack_known *skp = smk_of_current();
+       struct smack_onlycap *sop;
+
+       if (!capable(cap))
+               return 0;
+
+       rcu_read_lock();
+       if (list_empty(&smack_onlycap_list)) {
+               rcu_read_unlock();
+               return 1;
+       }
+
+       list_for_each_entry_rcu(sop, &smack_onlycap_list, list) {
+               if (sop->smk_label == skp) {
+                       rcu_read_unlock();
+                       return 1;
+               }
+       }
+       rcu_read_unlock();
+
+       return 0;
+}
index 91bb7a8..53335e4 100644 (file)
@@ -1378,6 +1378,9 @@ static int smack_file_receive(struct file *file)
        int may = 0;
        struct smk_audit_info ad;
 
        int may = 0;
        struct smk_audit_info ad;
 
+       if (unlikely(IS_PRIVATE(file->f_path.dentry->d_inode)))
+               return 0;
+
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
        smk_ad_setfield_u_fs_path(&ad, file->f_path);
        /*
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
        smk_ad_setfield_u_fs_path(&ad, file->f_path);
        /*
index 54e2fb1..efa575a 100644 (file)
@@ -86,16 +86,6 @@ int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
 int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
 
 /*
 int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
 
 /*
- * Unless a process is running with this label even
- * having CAP_MAC_OVERRIDE isn't enough to grant
- * privilege to violate MAC policy. If no label is
- * designated (the NULL case) capabilities apply to
- * everyone. It is expected that the hat (^) label
- * will be used if any label is used.
- */
-char *smack_onlycap;
-
-/*
  * Certain IP addresses may be designated as single label hosts.
  * Packets are sent there unlabeled, but only from tasks that
  * can write to the specified label.
  * Certain IP addresses may be designated as single label hosts.
  * Packets are sent there unlabeled, but only from tasks that
  * can write to the specified label.
@@ -529,23 +519,17 @@ static void *smk_seq_start(struct seq_file *s, loff_t *pos,
                                struct list_head *head)
 {
        struct list_head *list;
                                struct list_head *head)
 {
        struct list_head *list;
+       int i = *pos;
+
+       rcu_read_lock();
+       for (list = rcu_dereference(list_next_rcu(head));
+               list != head;
+               list = rcu_dereference(list_next_rcu(list))) {
+               if (i-- == 0)
+                       return list;
+       }
 
 
-       /*
-        * This is 0 the first time through.
-        */
-       if (s->index == 0)
-               s->private = head;
-
-       if (s->private == NULL)
-               return NULL;
-
-       list = s->private;
-       if (list_empty(list))
-               return NULL;
-
-       if (s->index == 0)
-               return list->next;
-       return list;
+       return NULL;
 }
 
 static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
 }
 
 static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
@@ -553,17 +537,15 @@ static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
 {
        struct list_head *list = v;
 
 {
        struct list_head *list = v;
 
-       if (list_is_last(list, head)) {
-               s->private = NULL;
-               return NULL;
-       }
-       s->private = list->next;
-       return list->next;
+       ++*pos;
+       list = rcu_dereference(list_next_rcu(list));
+
+       return (list == head) ? NULL : list;
 }
 
 static void smk_seq_stop(struct seq_file *s, void *v)
 {
 }
 
 static void smk_seq_stop(struct seq_file *s, void *v)
 {
-       /* No-op */
+       rcu_read_unlock();
 }
 
 static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
 }
 
 static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
@@ -619,7 +601,7 @@ static int load_seq_show(struct seq_file *s, void *v)
 {
        struct list_head *list = v;
        struct smack_master_list *smlp =
 {
        struct list_head *list = v;
        struct smack_master_list *smlp =
-                list_entry(list, struct smack_master_list, list);
+               list_entry_rcu(list, struct smack_master_list, list);
 
        smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN);
 
 
        smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN);
 
@@ -832,7 +814,7 @@ static int cipso_seq_show(struct seq_file *s, void *v)
 {
        struct list_head  *list = v;
        struct smack_known *skp =
 {
        struct list_head  *list = v;
        struct smack_known *skp =
-                list_entry(list, struct smack_known, list);
+               list_entry_rcu(list, struct smack_known, list);
        struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
        char sep = '/';
        int i;
        struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
        char sep = '/';
        int i;
@@ -1021,7 +1003,7 @@ static int cipso2_seq_show(struct seq_file *s, void *v)
 {
        struct list_head  *list = v;
        struct smack_known *skp =
 {
        struct list_head  *list = v;
        struct smack_known *skp =
-                list_entry(list, struct smack_known, list);
+               list_entry_rcu(list, struct smack_known, list);
        struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
        char sep = '/';
        int i;
        struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
        char sep = '/';
        int i;
@@ -1105,7 +1087,7 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
 {
        struct list_head *list = v;
        struct smk_netlbladdr *skp =
 {
        struct list_head *list = v;
        struct smk_netlbladdr *skp =
-                        list_entry(list, struct smk_netlbladdr, list);
+                       list_entry_rcu(list, struct smk_netlbladdr, list);
        unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr;
        int maskn;
        u32 temp_mask = be32_to_cpu(skp->smk_mask.s_addr);
        unsigned char *hp = (char *) &skp->smk_host.sin_addr.s_addr;
        int maskn;
        u32 temp_mask = be32_to_cpu(skp->smk_mask.s_addr);
@@ -1666,34 +1648,78 @@ static const struct file_operations smk_ambient_ops = {
        .llseek         = default_llseek,
 };
 
        .llseek         = default_llseek,
 };
 
-/**
- * smk_read_onlycap - read() for /smack/onlycap
- * @filp: file pointer, not actually used
- * @buf: where to put the result
- * @cn: maximum to send along
- * @ppos: where to start
- *
- * Returns number of bytes read or error code, as appropriate
+/*
+ * Seq_file operations for /smack/onlycap
  */
  */
-static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
-                               size_t cn, loff_t *ppos)
+static void *onlycap_seq_start(struct seq_file *s, loff_t *pos)
 {
 {
-       char *smack = "";
-       ssize_t rc = -EINVAL;
-       int asize;
+       return smk_seq_start(s, pos, &smack_onlycap_list);
+}
 
 
-       if (*ppos != 0)
-               return 0;
+static void *onlycap_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       return smk_seq_next(s, v, pos, &smack_onlycap_list);
+}
+
+static int onlycap_seq_show(struct seq_file *s, void *v)
+{
+       struct list_head *list = v;
+       struct smack_onlycap *sop =
+               list_entry_rcu(list, struct smack_onlycap, list);
 
 
-       if (smack_onlycap != NULL)
-               smack = smack_onlycap;
+       seq_puts(s, sop->smk_label->smk_known);
+       seq_putc(s, ' ');
 
 
-       asize = strlen(smack) + 1;
+       return 0;
+}
 
 
-       if (cn >= asize)
-               rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
+static const struct seq_operations onlycap_seq_ops = {
+       .start = onlycap_seq_start,
+       .next  = onlycap_seq_next,
+       .show  = onlycap_seq_show,
+       .stop  = smk_seq_stop,
+};
 
 
-       return rc;
+static int smk_open_onlycap(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &onlycap_seq_ops);
+}
+
+/**
+ * list_swap_rcu - swap public list with a private one in RCU-safe way
+ * The caller must hold appropriate mutex to prevent concurrent modifications
+ * to the public list.
+ * Private list is assumed to be not accessible to other threads yet.
+ *
+ * @public: public list
+ * @private: private list
+ */
+static void list_swap_rcu(struct list_head *public, struct list_head *private)
+{
+       struct list_head *first, *last;
+
+       if (list_empty(public)) {
+               list_splice_init_rcu(private, public, synchronize_rcu);
+       } else {
+               /* Remember public list before replacing it */
+               first = public->next;
+               last = public->prev;
+
+               /* Publish private list in place of public in RCU-safe way */
+               private->prev->next = public;
+               private->next->prev = public;
+               rcu_assign_pointer(public->next, private->next);
+               public->prev = private->prev;
+
+               synchronize_rcu();
+
+               /* When all readers are done with the old public list,
+                * attach it in place of private */
+               private->next = first;
+               private->prev = last;
+               first->prev = private;
+               last->next = private;
+       }
 }
 
 /**
 }
 
 /**
@@ -1709,24 +1735,48 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        char *data;
                                 size_t count, loff_t *ppos)
 {
        char *data;
-       struct smack_known *skp = smk_of_task(current->cred->security);
+       char *data_parse;
+       char *tok;
+       struct smack_known *skp;
+       struct smack_onlycap *sop;
+       struct smack_onlycap *sop2;
+       LIST_HEAD(list_tmp);
        int rc = count;
 
        if (!smack_privileged(CAP_MAC_ADMIN))
                return -EPERM;
 
        int rc = count;
 
        if (!smack_privileged(CAP_MAC_ADMIN))
                return -EPERM;
 
-       /*
-        * This can be done using smk_access() but is done
-        * explicitly for clarity. The smk_access() implementation
-        * would use smk_access(smack_onlycap, MAY_WRITE)
-        */
-       if (smack_onlycap != NULL && smack_onlycap != skp->smk_known)
-               return -EPERM;
-
-       data = kzalloc(count, GFP_KERNEL);
+       data = kzalloc(count + 1, GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
 
        if (data == NULL)
                return -ENOMEM;
 
+       if (copy_from_user(data, buf, count) != 0) {
+               kfree(data);
+               return -EFAULT;
+       }
+
+       data_parse = data;
+       while ((tok = strsep(&data_parse, " ")) != NULL) {
+               if (!*tok)
+                       continue;
+
+               skp = smk_import_entry(tok, 0);
+               if (IS_ERR(skp)) {
+                       rc = PTR_ERR(skp);
+                       break;
+               }
+
+               sop = kzalloc(sizeof(*sop), GFP_KERNEL);
+               if (sop == NULL) {
+                       rc = -ENOMEM;
+                       break;
+               }
+
+               sop->smk_label = skp;
+               list_add_rcu(&sop->list, &list_tmp);
+       }
+       kfree(data);
+
        /*
         * Should the null string be passed in unset the onlycap value.
         * This seems like something to be careful with as usually
        /*
         * Should the null string be passed in unset the onlycap value.
         * This seems like something to be careful with as usually
@@ -1736,20 +1786,31 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
         *
         * smk_import will also reject a label beginning with '-',
         * so "-usecapabilities" will also work.
         *
         * smk_import will also reject a label beginning with '-',
         * so "-usecapabilities" will also work.
+        *
+        * But do so only on invalid label, not on system errors.
+        * The invalid label must be first to count as clearing attempt.
         */
         */
-       if (copy_from_user(data, buf, count) != 0)
-               rc = -EFAULT;
-       else
-               smack_onlycap = smk_import(data, count);
+       if (rc == -EINVAL && list_empty(&list_tmp))
+               rc = count;
+
+       if (rc >= 0) {
+               mutex_lock(&smack_onlycap_lock);
+               list_swap_rcu(&smack_onlycap_list, &list_tmp);
+               mutex_unlock(&smack_onlycap_lock);
+       }
+
+       list_for_each_entry_safe(sop, sop2, &list_tmp, list)
+               kfree(sop);
 
 
-       kfree(data);
        return rc;
 }
 
 static const struct file_operations smk_onlycap_ops = {
        return rc;
 }
 
 static const struct file_operations smk_onlycap_ops = {
-       .read           = smk_read_onlycap,
+       .open           = smk_open_onlycap,
+       .read           = seq_read,
        .write          = smk_write_onlycap,
        .write          = smk_write_onlycap,
-       .llseek         = default_llseek,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
 };
 
 /**
 };
 
 /**
@@ -1839,7 +1900,7 @@ static int load_self_seq_show(struct seq_file *s, void *v)
 {
        struct list_head *list = v;
        struct smack_rule *srp =
 {
        struct list_head *list = v;
        struct smack_rule *srp =
-                list_entry(list, struct smack_rule, list);
+               list_entry_rcu(list, struct smack_rule, list);
 
        smk_rule_show(s, srp, SMK_LABELLEN);
 
 
        smk_rule_show(s, srp, SMK_LABELLEN);
 
@@ -1962,7 +2023,7 @@ static int load2_seq_show(struct seq_file *s, void *v)
 {
        struct list_head *list = v;
        struct smack_master_list *smlp =
 {
        struct list_head *list = v;
        struct smack_master_list *smlp =
-                list_entry(list, struct smack_master_list, list);
+               list_entry_rcu(list, struct smack_master_list, list);
 
        smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL);
 
 
        smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL);
 
@@ -2039,7 +2100,7 @@ static int load_self2_seq_show(struct seq_file *s, void *v)
 {
        struct list_head *list = v;
        struct smack_rule *srp =
 {
        struct list_head *list = v;
        struct smack_rule *srp =
-                list_entry(list, struct smack_rule, list);
+               list_entry_rcu(list, struct smack_rule, list);
 
        smk_rule_show(s, srp, SMK_LONGLABEL);
 
 
        smk_rule_show(s, srp, SMK_LONGLABEL);