3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * Base kernel context APIs
24 #include <mali_kbase.h>
25 #include <mali_midg_regmap.h>
26 #include <mali_kbase_instr.h>
27 #include <mali_kbase_mem_linux.h>
30 * kbase_create_context() - Create a kernel base context.
31 * @kbdev: Kbase device
32 * @is_compat: Force creation of a 32-bit context
34 * Allocate and init a kernel base context.
36 * Return: new kbase context
38 struct kbase_context *
39 kbase_create_context(struct kbase_device *kbdev, bool is_compat)
41 struct kbase_context *kctx;
44 KBASE_DEBUG_ASSERT(kbdev != NULL);
46 /* zero-inited as lot of code assume it's zero'ed out on create */
47 kctx = vzalloc(sizeof(*kctx));
52 /* creating a context is considered a disjoint event */
53 kbase_disjoint_event(kbdev);
56 kctx->as_nr = KBASEP_AS_NR_INVALID;
57 kctx->is_compat = is_compat;
58 #ifdef CONFIG_MALI_TRACE_TIMELINE
59 kctx->timeline.owner_tgid = task_tgid_nr(current);
61 atomic_set(&kctx->setup_complete, 0);
62 atomic_set(&kctx->setup_in_progress, 0);
63 kctx->infinite_cache_active = 0;
64 spin_lock_init(&kctx->mm_update_lock);
65 kctx->process_mm = NULL;
66 atomic_set(&kctx->nonmapped_pages, 0);
67 kctx->slots_pullable = 0;
68 kctx->tgid = current->tgid;
69 kctx->pid = current->pid;
71 err = kbase_mem_pool_init(&kctx->mem_pool,
72 kbdev->mem_pool_max_size_default,
73 kctx->kbdev, &kbdev->mem_pool);
77 err = kbase_mem_evictable_init(kctx);
81 atomic_set(&kctx->used_pages, 0);
83 err = kbase_jd_init(kctx);
85 goto deinit_evictable;
87 err = kbasep_js_kctx_init(kctx);
89 goto free_jd; /* safe to call kbasep_js_kctx_term in this case */
91 err = kbase_event_init(kctx);
95 mutex_init(&kctx->reg_lock);
97 INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
98 spin_lock_init(&kctx->waiting_soft_jobs_lock);
100 INIT_LIST_HEAD(&kctx->waiting_kds_resource);
102 err = kbase_dma_fence_init(kctx);
106 err = kbase_mmu_init(kctx);
110 kctx->pgd = kbase_mmu_alloc_pgd(kctx);
114 kctx->aliasing_sink_page = kbase_mem_pool_alloc(&kctx->mem_pool);
115 if (!kctx->aliasing_sink_page)
118 init_waitqueue_head(&kctx->event_queue);
120 kctx->cookies = KBASE_COOKIE_MASK;
122 /* Make sure page 0 is not used... */
123 err = kbase_region_tracker_init(kctx);
125 goto no_region_tracker;
127 err = kbase_sticky_resource_init(kctx);
131 err = kbase_jit_init(kctx);
134 #ifdef CONFIG_GPU_TRACEPOINTS
135 atomic_set(&kctx->jctx.work_id, 0);
137 #ifdef CONFIG_MALI_TRACE_TIMELINE
138 atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
141 kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
143 mutex_init(&kctx->vinstr_cli_lock);
145 hrtimer_init(&kctx->soft_event_timeout, CLOCK_MONOTONIC,
147 kctx->soft_event_timeout.function = &kbasep_soft_event_timeout_worker;
152 kbase_gpu_vm_lock(kctx);
153 kbase_sticky_resource_term(kctx);
154 kbase_gpu_vm_unlock(kctx);
156 kbase_region_tracker_term(kctx);
158 kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
160 /* VM lock needed for the call to kbase_mmu_free_pgd */
161 kbase_gpu_vm_lock(kctx);
162 kbase_mmu_free_pgd(kctx);
163 kbase_gpu_vm_unlock(kctx);
165 kbase_mmu_term(kctx);
167 kbase_dma_fence_term(kctx);
169 kbase_event_cleanup(kctx);
171 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
172 kbasep_js_kctx_term(kctx);
175 kbase_mem_evictable_deinit(kctx);
177 kbase_mem_pool_term(&kctx->mem_pool);
183 KBASE_EXPORT_SYMBOL(kbase_create_context);
185 static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
187 dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
188 kbase_mem_phy_alloc_put(reg->cpu_alloc);
189 kbase_mem_phy_alloc_put(reg->gpu_alloc);
194 * kbase_destroy_context - Destroy a kernel base context.
195 * @kctx: Context to destroy
197 * Calls kbase_destroy_os_context() to free OS specific structures.
198 * Will release all outstanding regions.
200 void kbase_destroy_context(struct kbase_context *kctx)
202 struct kbase_device *kbdev;
204 unsigned long pending_regions_to_clean;
206 KBASE_DEBUG_ASSERT(NULL != kctx);
209 KBASE_DEBUG_ASSERT(NULL != kbdev);
211 KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
213 /* Ensure the core is powered up for the destroy process */
214 /* A suspend won't happen here, because we're in a syscall from a userspace
216 kbase_pm_context_active(kbdev);
218 kbase_jd_zap_context(kctx);
219 kbase_event_cleanup(kctx);
222 * JIT must be terminated before the code below as it must be called
223 * without the region lock being held.
224 * The code above ensures no new JIT allocations can be made by
225 * by the time we get to this point of context tear down.
227 kbase_jit_term(kctx);
229 kbase_gpu_vm_lock(kctx);
231 kbase_sticky_resource_term(kctx);
233 /* MMU is disabled as part of scheduling out the context */
234 kbase_mmu_free_pgd(kctx);
236 /* drop the aliasing sink page now that it can't be mapped anymore */
237 kbase_mem_pool_free(&kctx->mem_pool, kctx->aliasing_sink_page, false);
239 /* free pending region setups */
240 pending_regions_to_clean = (~kctx->cookies) & KBASE_COOKIE_MASK;
241 while (pending_regions_to_clean) {
242 unsigned int cookie = __ffs(pending_regions_to_clean);
244 BUG_ON(!kctx->pending_regions[cookie]);
246 kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
248 kctx->pending_regions[cookie] = NULL;
249 pending_regions_to_clean &= ~(1UL << cookie);
252 kbase_region_tracker_term(kctx);
253 kbase_gpu_vm_unlock(kctx);
255 /* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
256 kbasep_js_kctx_term(kctx);
260 kbase_pm_context_idle(kbdev);
262 kbase_dma_fence_term(kctx);
264 kbase_mmu_term(kctx);
266 pages = atomic_read(&kctx->used_pages);
268 dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
270 kbase_mem_evictable_deinit(kctx);
271 kbase_mem_pool_term(&kctx->mem_pool);
272 WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
276 KBASE_EXPORT_SYMBOL(kbase_destroy_context);
279 * kbase_context_set_create_flags - Set creation flags on a context
280 * @kctx: Kbase context
281 * @flags: Flags to set
283 * Return: 0 on success
285 int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
288 struct kbasep_js_kctx_info *js_kctx_info;
289 unsigned long irq_flags;
291 KBASE_DEBUG_ASSERT(NULL != kctx);
293 js_kctx_info = &kctx->jctx.sched_info;
296 if (flags != (flags & BASE_CONTEXT_CREATE_KERNEL_FLAGS)) {
301 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
302 spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
304 /* Translate the flags */
305 if ((flags & BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
306 js_kctx_info->ctx.flags &= ~((u32) KBASE_CTX_FLAG_SUBMIT_DISABLED);
308 /* Latch the initial attributes into the Job Scheduler */
309 kbasep_js_ctx_attr_set_initial_attrs(kctx->kbdev, kctx);
311 spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock,
313 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
317 KBASE_EXPORT_SYMBOL(kbase_context_set_create_flags);