2 * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 #include "mali_osk_list.h"
14 #include "ump_uk_types.h"
15 #include "ump_kernel_interface.h"
16 #include "ump_kernel_common.h"
19 #ifdef CONFIG_DMA_SHARED_BUFFER
20 #include <linux/dma-buf.h>
23 /* ---------------- UMP kernel space API functions follows ---------------- */
27 UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
29 ump_dd_mem * mem = (ump_dd_mem *)memh;
31 DEBUG_ASSERT_POINTER(mem);
33 DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
35 return mem->secure_id;
40 UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
44 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
46 DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
47 if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
49 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
50 DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
51 return UMP_DD_HANDLE_INVALID;
54 ump_dd_reference_add(mem);
56 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
58 return (ump_dd_handle)mem;
61 UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get(ump_secure_id secure_id)
65 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
67 DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
68 if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
70 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
71 DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
72 return UMP_DD_HANDLE_INVALID;
75 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
77 return (ump_dd_handle)mem;
80 UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
82 ump_dd_mem * mem = (ump_dd_mem*) memh;
84 DEBUG_ASSERT_POINTER(mem);
86 return mem->nr_blocks;
91 UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
93 ump_dd_mem * mem = (ump_dd_mem *)memh;
95 DEBUG_ASSERT_POINTER(mem);
99 DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
100 return UMP_DD_INVALID;
103 if (mem->nr_blocks != num_blocks)
105 DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
106 return UMP_DD_INVALID;
109 DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
111 _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
113 return UMP_DD_SUCCESS;
118 UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
120 ump_dd_mem * mem = (ump_dd_mem *)memh;
122 DEBUG_ASSERT_POINTER(mem);
126 DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
127 return UMP_DD_INVALID;
130 if (index >= mem->nr_blocks)
132 DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
133 return UMP_DD_INVALID;
136 DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
138 *block = mem->block_array[index];
140 return UMP_DD_SUCCESS;
145 UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
147 ump_dd_mem * mem = (ump_dd_mem*)memh;
149 DEBUG_ASSERT_POINTER(mem);
151 DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
153 return mem->size_bytes;
158 UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
160 ump_dd_mem * mem = (ump_dd_mem*)memh;
163 DEBUG_ASSERT_POINTER(mem);
165 new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
167 DBG_MSG(5, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
172 UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
175 ump_dd_mem * mem = (ump_dd_mem*)memh;
177 DEBUG_ASSERT_POINTER(mem);
179 /* We must hold this mutex while doing the atomic_dec_and_read, to protect
180 that elements in the ump_descriptor_mapping table is always valid. If they
181 are not, userspace may accidently map in this secure_ids right before its freed
182 giving a mapped backdoor into unallocated memory.*/
183 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
185 new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
187 DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
191 DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
193 ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
195 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
198 #ifdef CONFIG_DMA_SHARED_BUFFER
200 * when ump descriptor imported to dmabuf is released,
201 * physical memory region to the ump descriptor should be
202 * released only through dma_buf_put().
203 * if dma_buf_put() is called then file's refcount to
204 * the dmabuf becomes 0 and release func of exporter will be
205 * called by file->f_op->release to release the physical
206 * memory region finally.
208 if (mem->import_attach) {
209 struct dma_buf_attachment *attach = mem->import_attach;
210 struct dma_buf *dmabuf = attach->dmabuf;
213 dma_buf_unmap_attachment(attach, mem->sgt,
216 dma_buf_detach(dmabuf, attach);
217 mem->import_attach = NULL;
222 mem->release_func(mem->ctx, mem);
227 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
233 /* --------------- Handling of user space requests follows --------------- */
236 _mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
238 ump_session_data * session_data;
240 DEBUG_ASSERT_POINTER( args );
241 DEBUG_ASSERT_POINTER( args->ctx );
243 session_data = (ump_session_data *)args->ctx;
245 /* check compatability */
246 if (args->version == UMP_IOCTL_API_VERSION)
248 DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
249 args->compatible = 1;
250 session_data->api_version = args->version;
252 else if (args->version == MAKE_VERSION_ID(1))
254 DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
255 args->compatible = 1;
256 session_data->api_version = args->version;
260 DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
261 args->compatible = 0;
262 args->version = UMP_IOCTL_API_VERSION; /* report our version */
265 return _MALI_OSK_ERR_OK;
269 _mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
271 ump_session_memory_list_element * session_memory_element;
272 ump_session_memory_list_element * tmp;
273 ump_session_data * session_data;
274 _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
277 DEBUG_ASSERT_POINTER( release_info );
278 DEBUG_ASSERT_POINTER( release_info->ctx );
280 /* Retreive the session data */
281 session_data = (ump_session_data*)release_info->ctx;
283 /* If there are many items in the memory session list we
284 * could be de-referencing this pointer a lot so keep a local copy
286 secure_id = release_info->secure_id;
288 DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
290 /* Iterate through the memory list looking for the requested secure ID */
291 _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
292 _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
294 if ( session_memory_element->mem->secure_id == secure_id)
296 ump_dd_mem *release_mem;
298 release_mem = session_memory_element->mem;
299 _mali_osk_list_del(&session_memory_element->list);
300 ump_dd_reference_release(release_mem);
301 _mali_osk_free(session_memory_element);
303 ret = _MALI_OSK_ERR_OK;
308 _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
309 DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
311 DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
315 _mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
318 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
320 DEBUG_ASSERT_POINTER( user_interaction );
322 /* We lock the mappings so things don't get removed while we are looking for the memory */
323 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
324 if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
326 user_interaction->size = mem->size_bytes;
327 DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
328 ret = _MALI_OSK_ERR_OK;
332 user_interaction->size = 0;
333 DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
336 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
342 void _ump_ukk_msync( _ump_uk_msync_s *args )
344 ump_dd_mem * mem = NULL;
345 void *virtual = NULL;
349 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
350 ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
354 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
355 DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
358 /* Ensure the memory doesn't dissapear when we are flushing it. */
359 ump_dd_reference_add(mem);
360 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
362 /* Returns the cache settings back to Userspace */
363 args->is_cached=mem->is_cached;
365 /* If this flag is the only one set, we should not do the actual flush, only the readout */
366 if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
368 DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
369 goto msync_release_and_return;
372 /* Nothing to do if the memory is not caches */
373 if ( 0==mem->is_cached )
375 DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
376 goto msync_release_and_return;
378 DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync Flush OP: %d Address: 0x%08x Mapping: 0x%08x\n",
379 (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
383 virtual = (void *)((u32)args->address);
384 offset = (u32)((args->address) - (args->mapping));
386 /* Flush entire mapping when no address is specified. */
387 virtual = args->mapping;
393 /* Flush entire mapping when no size is specified. */
394 size = mem->size_bytes - offset;
397 if ( (offset + size) > mem->size_bytes )
399 DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
400 goto msync_release_and_return;
403 /* The actual cache flush - Implemented for each OS*/
404 _ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
406 msync_release_and_return:
407 ump_dd_reference_release(mem);
411 void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
413 ump_session_data * session_data;
414 ump_uk_cache_op_control op;
416 DEBUG_ASSERT_POINTER( args );
417 DEBUG_ASSERT_POINTER( args->ctx );
420 session_data = (ump_session_data *)args->ctx;
422 _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
423 if ( op== _UMP_UK_CACHE_OP_START )
425 session_data->cache_operations_ongoing++;
426 DBG_MSG(4, ("Cache ops start\n" ));
427 if ( session_data->cache_operations_ongoing != 1 )
429 DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
432 else if ( op== _UMP_UK_CACHE_OP_FINISH )
434 DBG_MSG(4, ("Cache ops finish\n"));
435 session_data->cache_operations_ongoing--;
437 if ( session_data->has_pending_level1_cache_flush)
439 /* This function will set has_pending_level1_cache_flush=0 */
440 _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
444 /* to be on the safe side: always flush l1 cache when cache operations are done */
445 _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
446 DBG_MSG(4, ("Cache ops finish end\n" ));
450 DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
452 _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
456 void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
458 ump_dd_mem * mem = NULL;
459 ump_uk_user old_user;
460 ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
461 ump_session_data *session_data;
463 DEBUG_ASSERT_POINTER( args );
464 DEBUG_ASSERT_POINTER( args->ctx );
466 session_data = (ump_session_data *)args->ctx;
468 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
469 ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
473 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
474 DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
478 old_user = mem->hw_device;
479 mem->hw_device = args->new_user;
481 DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
483 if ( ! mem->is_cached )
485 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
486 DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
490 if ( old_user == args->new_user)
492 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
493 DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
497 /* Previous AND new is both different from CPU */
498 (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU )
501 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
502 DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
506 if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) )
508 cache_op =_UMP_UK_MSYNC_INVALIDATE;
509 DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
510 #ifdef UMP_SKIP_INVALIDATION
512 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
513 DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
517 /* Ensure the memory doesn't dissapear when we are flushing it. */
518 ump_dd_reference_add(mem);
519 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
521 /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
522 _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
523 /* Actual cache flush */
524 _ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
525 _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
527 ump_dd_reference_release(mem);
528 DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
532 void _ump_ukk_lock(_ump_uk_lock_s *args )
534 ump_dd_mem * mem = NULL;
536 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
537 ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
541 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
542 DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
545 ump_dd_reference_add(mem);
546 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
548 DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
550 mem->lock_usage = (ump_lock_usage) args->lock_usage;
552 /** TODO: TAKE LOCK HERE */
554 ump_dd_reference_release(mem);
557 void _ump_ukk_unlock(_ump_uk_unlock_s *args )
559 ump_dd_mem * mem = NULL;
561 _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
562 ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
566 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
567 DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
570 ump_dd_reference_add(mem);
571 _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
573 DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
575 mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
577 /** TODO: RELEASE LOCK HERE */
579 ump_dd_reference_release(mem);