3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
54 #if SEP_DRIVER_ARM_DEBUG_MODE
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
71 static void sep_load_rom_code(struct sep_device *sep)
74 unsigned long i, k, j;
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
88 for (i = 0; i < 4; i++) {
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
98 j = CRYS_SEP_ROM_length;
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
107 /* poll for SEP ROM boot finish */
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 edbg("SEP Driver: ROM polling ended\n");
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
121 /* Boot First Phase ended */
122 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
123 edbg("SEP Driver: ROM polling case 2\n");
126 /* Cold boot ended successfully */
127 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
128 edbg("SEP Driver: ROM polling case 4\n");
132 /* Warmboot ended successfully */
133 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
134 edbg("SEP Driver: ROM polling case 8\n");
138 /* ColdWarm boot ended successfully */
139 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
140 edbg("SEP Driver: ROM polling case 16\n");
144 edbg("SEP Driver: ROM polling case 32\n");
151 static void sep_load_rom_code(struct sep_device *sep) { }
152 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
156 /*----------------------------------------
158 -----------------------------------------*/
160 #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
161 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
162 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
164 /*--------------------------------------------
166 --------------------------------------------*/
168 /* debug messages level */
169 INT_MODULE_PARM(sepDebug, 0x0);
170 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
172 /* Keep this a single static object for now to keep the conversion easy */
174 static struct sep_device sep_instance;
175 static struct sep_device *sep_dev = &sep_instance;
178 mutex for the access to the internals of the sep driver
180 static DEFINE_MUTEX(sep_mutex);
183 /* wait queue head (event) of the driver */
184 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
187 This functions copies the cache and resident from their source location into
188 destination memory, which is external to Linux VM and is given as
191 static int sep_copy_cache_resident_to_area(struct sep_device *sep,
192 unsigned long src_cache_addr,
193 unsigned long cache_size_in_bytes,
194 unsigned long src_resident_addr,
195 unsigned long resident_size_in_bytes,
196 unsigned long *dst_new_cache_addr_ptr,
197 unsigned long *dst_new_resident_addr_ptr)
199 unsigned long resident_addr;
200 unsigned long cache_addr;
201 const struct firmware *fw;
203 char *cache_name = "cache.image.bin";
204 char *res_name = "resident.image.bin";
209 /*--------------------------------
211 -------------------------------------*/
214 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
215 edbg("SEP Driver:rar_physical is %08lx\n", sep->rar_bus);
217 sep->rar_region_addr = (unsigned long) sep->rar_addr;
219 sep->cache_bus = sep->rar_bus;
220 sep->cache_addr = sep->rar_addr;
223 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
225 edbg("SEP Driver:cant request cache fw\n");
229 edbg("SEP Driver:cache data loc is %p\n", (void *) fw->data);
230 edbg("SEP Driver:cache data size is %08Zx\n", fw->size);
232 memcpy((void *) sep_dev->cache_addr, (void *) fw->data, fw->size);
234 sep_dev->cache_size = fw->size;
236 cache_addr = (unsigned long) sep_dev->cache_addr;
238 release_firmware(fw);
240 sep->resident_bus = sep->cache_bus + sep->cache_size;
241 sep->resident_addr = sep->cache_addr + sep->cache_size;
244 error = request_firmware(&fw, res_name, &sep->pdev->dev);
246 edbg("SEP Driver:cant request res fw\n");
250 edbg("SEP Driver:res data loc is %p\n", (void *) fw->data);
251 edbg("SEP Driver:res data size is %08Zx\n", fw->size);
253 memcpy((void *) sep->resident_addr, (void *) fw->data, fw->size);
255 sep->resident_size = fw->size;
257 release_firmware(fw);
259 resident_addr = (unsigned long) sep->resident_addr;
261 edbg("SEP Driver:resident_addr (physical )is %08lx\n", sep->resident_bus);
262 edbg("SEP Driver:cache_addr (physical) is %08lx\n", sep->cache_bus);
264 edbg("SEP Driver:resident_addr (logical )is %08lx\n", resident_addr);
265 edbg("SEP Driver:cache_addr (logical) is %08lx\n", cache_addr);
267 edbg("SEP Driver:resident_size is %08lx\n", sep->resident_size);
268 edbg("SEP Driver:cache_size is %08lx\n", sep->cache_size);
272 /* physical addresses */
273 *dst_new_cache_addr_ptr = sep->cache_bus;
274 *dst_new_resident_addr_ptr = sep->resident_bus;
280 This functions maps and allocates the
281 shared area on the external RAM (device)
282 The input is shared_area_size - the size of the memory to
283 allocate. The outputs
284 are kernel_shared_area_addr_ptr - the kerenl
285 address of the mapped and allocated
286 shared area, and phys_shared_area_addr_ptr
287 - the physical address of the shared area
289 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
290 unsigned long shared_area_size,
291 unsigned long *kernel_shared_area_addr_ptr,
292 unsigned long *phys_shared_area_addr_ptr)
294 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
295 sep->shared_addr = kmalloc(shared_area_size, GFP_KERNEL);
296 if (!sep->shared_addr) {
297 edbg("sep_driver:shared memory kmalloc failed\n");
301 sep_dev->shared_bus = __pa(sep->shared_addr);
302 /* shared_bus = 0xda00000; */
303 *kernel_shared_area_addr_ptr = (unsigned long) sep->shared_addr;
304 /* set the physical address of the shared area */
305 *phys_shared_area_addr_ptr = sep->shared_bus;
306 edbg("SEP Driver:shared_addr is %p\n", sep->shared_addr);
307 edbg("SEP Driver:shared_region_size is %08lx\n", shared_area_size);
308 edbg("SEP Driver:shared_physical_addr is %08lx\n", *phys_shared_area_addr_ptr);
314 This functions unmaps and deallocates the shared area
315 on the external RAM (device)
316 The input is shared_area_size - the size of the memory to deallocate,kernel_
317 shared_area_addr_ptr - the kernel address of the mapped and allocated
318 shared area,phys_shared_area_addr_ptr - the physical address of
321 static void sep_unmap_and_free_shared_area(unsigned long shared_area_size, unsigned long kernel_shared_area_addr, unsigned long phys_shared_area_addr)
323 kfree((void *) kernel_shared_area_addr);
327 This functions returns the physical address inside shared area according
328 to the virtual address. It can be either on the externa RAM device
329 (ioremapped), or on the system RAM
330 This implementation is for the external RAM
332 static unsigned long sep_shared_area_virt_to_phys(struct sep_device *sep,
333 unsigned long virt_address)
335 edbg("SEP Driver:sh virt to phys v %08lx\n", virt_address);
336 edbg("SEP Driver:sh virt to phys p %08lx\n", sep->shared_bus + (virt_address - (unsigned long) sep->shared_addr));
338 return (unsigned long) sep->shared_bus + (virt_address - (unsigned long) sep->shared_addr);
342 This functions returns the virtual address inside shared area
343 according to the physical address. It can be either on the
344 externa RAM device (ioremapped), or on the system RAM This implementation
345 is for the external RAM
347 static unsigned long sep_shared_area_phys_to_virt(struct sep_device *sep,
348 unsigned long phys_address)
350 return (unsigned long) sep->shared_addr + (phys_address - sep->shared_bus);
354 /*----------------------------------------------------------------------
355 open function of the character driver - must only lock the mutex
356 must also release the memory data pool allocations
357 ------------------------------------------------------------------------*/
358 static int sep_open(struct inode *inode, struct file *filp)
362 dbg("SEP Driver:--------> open start\n");
364 /* check the blocking mode */
365 if (filp->f_flags & O_NDELAY)
366 error = mutex_trylock(&sep_mutex);
369 mutex_lock(&sep_mutex);
371 /* check the error */
373 edbg("SEP Driver: down_interruptible failed\n");
376 /* Bind to the device, we only have one which makes it easy */
377 filp->private_data = sep_dev;
381 /* release data pool allocations */
382 sep_dev->data_pool_bytes_allocated = 0;
386 dbg("SEP Driver:<-------- open end\n");
393 /*------------------------------------------------------------
395 -------------------------------------------------------------*/
396 static int sep_release(struct inode *inode_ptr, struct file *filp)
398 struct sep_driver *sep = filp->private_data;
399 dbg("----------->SEP Driver: sep_release start\n");
401 #if 0 /*!SEP_DRIVER_POLLING_MODE */
403 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
404 /* release IRQ line */
405 free_irq(SEP_DIRVER_IRQ_NUM, sep);
408 /* unlock the sep mutex */
409 mutex_unlock(&sep_mutex);
410 dbg("SEP Driver:<-------- sep_release end\n");
417 /*---------------------------------------------------------------
418 map function - this functions maps the message shared area
419 -----------------------------------------------------------------*/
420 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
422 unsigned long phys_addr;
423 struct sep_device *sep = filp->private_data;
425 dbg("-------->SEP Driver: mmap start\n");
427 /* check that the size of the mapped range is as the size of the message
429 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
430 edbg("SEP Driver mmap requested size is more than allowed\n");
431 printk(KERN_WARNING "SEP Driver mmap requested size is more \
433 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
434 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
438 edbg("SEP Driver:sep->message_shared_area_addr is %08lx\n", sep->message_shared_area_addr);
440 /* get physical address */
441 phys_addr = sep->shared_area_bus;
443 edbg("SEP Driver: phys_addr is %08lx\n", phys_addr);
445 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
446 edbg("SEP Driver remap_page_range failed\n");
447 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
451 dbg("SEP Driver:<-------- mmap end\n");
457 /*-----------------------------------------------
459 *----------------------------------------------*/
460 static unsigned int sep_poll(struct file *filp, poll_table * wait)
463 unsigned int mask = 0;
464 unsigned long retVal = 0; /* flow id */
465 struct sep_device *sep = filp->private_data;
467 dbg("---------->SEP Driver poll: start\n");
470 #if SEP_DRIVER_POLLING_MODE
472 while (sep->send_ct != (retVal & 0x7FFFFFFF)) {
473 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
475 for (count = 0; count < 10 * 4; count += 4)
476 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
481 /* add the event to the polling wait table */
482 poll_wait(filp, &sep_event, wait);
486 edbg("sep->send_ct is %lu\n", sep->send_ct);
487 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
489 /* check if the data is ready */
490 if (sep->send_ct == sep->reply_ct) {
491 for (count = 0; count < 12 * 4; count += 4)
492 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
494 for (count = 0; count < 10 * 4; count += 4)
495 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + 0x1800 + count)));
497 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
498 edbg("retVal is %lu\n", retVal);
499 /* check if the this is sep reply or request */
501 edbg("SEP Driver: sep request in\n");
503 mask |= POLLOUT | POLLWRNORM;
505 edbg("SEP Driver: sep reply in\n");
506 mask |= POLLIN | POLLRDNORM;
509 dbg("SEP Driver:<-------- poll exit\n");
514 calculates time and sets it at the predefined address
516 static int sep_set_time(struct sep_device *sep, unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
519 /* address of time in the kernel */
520 unsigned long time_addr;
523 dbg("SEP Driver:--------> sep_set_time start\n");
525 do_gettimeofday(&time);
527 /* set value in the SYSTEM MEMORY offset */
528 time_addr = sep->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
530 *(unsigned long *) time_addr = SEP_TIME_VAL_TOKEN;
531 *(unsigned long *) (time_addr + 4) = time.tv_sec;
533 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
534 edbg("SEP Driver:time_addr is %lu\n", time_addr);
535 edbg("SEP Driver:sep->message_shared_area_addr is %lu\n", sep->message_shared_area_addr);
537 /* set the output parameters if needed */
539 *address_ptr = sep_shared_area_virt_to_phys(sep, time_addr);
542 *time_in_sec_ptr = time.tv_sec;
544 dbg("SEP Driver:<-------- sep_set_time end\n");
550 This function raises interrupt to SEP that signals that is has a new
553 static void sep_send_command_handler(struct sep_device *sep)
557 dbg("SEP Driver:--------> sep_send_command_handler start\n");
558 sep_set_time(sep, 0, 0);
563 for (count = 0; count < 12 * 4; count += 4)
564 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
568 /* send interrupt to SEP */
569 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
570 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
575 This function raises interrupt to SEPm that signals that is has a
576 new command from HOST
578 static void sep_send_reply_command_handler(struct sep_device *sep)
582 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
586 for (count = 0; count < 12 * 4; count += 4)
587 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
590 /* send the interrupt to SEP */
591 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
592 /* update both counters */
595 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
599 This function handles the allocate data pool memory request
600 This function returns calculates the physical address of the
601 allocated memory, and the offset of this area from the mapped address.
602 Therefore, the FVOs in user space can calculate the exact virtual
603 address of this allocated memory
605 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
609 struct sep_driver_alloc_t command_args;
611 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
613 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
617 /* allocate memory */
618 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
619 /* FIXME: ENOMEM ? */
624 /* set the virtual and physical address */
625 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
626 command_args.phys_address = sep->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
628 /* write the memory back to the user space */
629 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
633 /* set the allocation */
634 sep->data_pool_bytes_allocated += command_args.num_bytes;
637 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
642 This function handles write into allocated data pool command
644 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
647 unsigned long virt_address;
648 unsigned long app_in_address;
649 unsigned long num_bytes;
650 unsigned long data_pool_area_addr;
652 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
654 /* get the application address */
655 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
659 /* get the virtual kernel address address */
660 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
664 /* get the number of bytes */
665 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
669 /* calculate the start of the data pool */
670 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
673 /* check that the range of the virtual kernel address is correct */
674 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
675 /* FIXME: EINVAL ? */
679 /* copy the application data */
680 error = copy_from_user((void *) virt_address, (void *) app_in_address, num_bytes);
682 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
687 this function handles the read from data pool command
689 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
692 /* virtual address of dest application buffer */
693 unsigned long app_out_address;
694 /* virtual address of the data pool */
695 unsigned long virt_address;
696 unsigned long num_bytes;
697 unsigned long data_pool_area_addr;
699 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
701 /* get the application address */
702 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
706 /* get the virtual kernel address address */
707 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
711 /* get the number of bytes */
712 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
716 /* calculate the start of the data pool */
717 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
719 /* FIXME: These are incomplete all over the driver: what about + len
720 and when doing that also overflows */
721 /* check that the range of the virtual kernel address is correct */
722 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
727 /* copy the application data */
728 error = copy_to_user((void *) app_out_address, (void *) virt_address, num_bytes);
730 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
735 This function releases all the application virtual buffer physical pages,
736 that were previously locked
738 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
743 for (count = 0; count < num_pages; count++) {
744 /* the out array was written, therefore the data was changed */
745 if (!PageReserved(page_array_ptr[count]))
746 SetPageDirty(page_array_ptr[count]);
747 page_cache_release(page_array_ptr[count]);
750 /* free in pages - the data was only read, therefore no update was done
752 for (count = 0; count < num_pages; count++)
753 page_cache_release(page_array_ptr[count]);
758 kfree(page_array_ptr);
764 This function locks all the physical pages of the kernel virtual buffer
765 and construct a basic lli array, where each entry holds the physical
766 page address and the size that application data holds in this physical pages
768 static int sep_lock_kernel_pages(struct sep_device *sep,
769 unsigned long kernel_virt_addr,
770 unsigned long data_size,
771 unsigned long *num_pages_ptr,
772 struct sep_lli_entry_t **lli_array_ptr,
773 struct page ***page_array_ptr)
776 /* the the page of the end address of the user space buffer */
777 unsigned long end_page;
778 /* the page of the start address of the user space buffer */
779 unsigned long start_page;
780 /* the range in pages */
781 unsigned long num_pages;
782 struct sep_lli_entry_t *lli_array;
783 /* next kernel address to map */
784 unsigned long next_kernel_address;
787 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
789 /* set start and end pages and num pages */
790 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
791 start_page = kernel_virt_addr >> PAGE_SHIFT;
792 num_pages = end_page - start_page + 1;
794 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
795 edbg("SEP Driver: data_size is %lu\n", data_size);
796 edbg("SEP Driver: start_page is %lx\n", start_page);
797 edbg("SEP Driver: end_page is %lx\n", end_page);
798 edbg("SEP Driver: num_pages is %lu\n", num_pages);
800 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
802 edbg("SEP Driver: kmalloc for lli_array failed\n");
807 /* set the start address of the first page - app data may start not at
808 the beginning of the page */
809 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
811 /* check that not all the data is in the first page only */
812 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
813 lli_array[0].block_size = data_size;
815 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
818 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
820 /* advance the address to the start of the next page */
821 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
823 /* go from the second page to the prev before last */
824 for (count = 1; count < (num_pages - 1); count++) {
825 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
826 lli_array[count].block_size = PAGE_SIZE;
828 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
829 next_kernel_address += PAGE_SIZE;
832 /* if more then 1 pages locked - then update for the last page size needed */
834 /* update the address of the last page */
835 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
837 /* set the size of the last page */
838 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
840 if (lli_array[count].block_size == 0) {
841 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
842 dbg("data_size is %lu\n", data_size);
846 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
848 /* set output params */
849 *lli_array_ptr = lli_array;
850 *num_pages_ptr = num_pages;
853 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
858 This function locks all the physical pages of the application virtual buffer
859 and construct a basic lli array, where each entry holds the physical page
860 address and the size that application data holds in this physical pages
862 static int sep_lock_user_pages(struct sep_device *sep,
863 unsigned long app_virt_addr,
864 unsigned long data_size,
865 unsigned long *num_pages_ptr,
866 struct sep_lli_entry_t **lli_array_ptr,
867 struct page ***page_array_ptr)
870 /* the the page of the end address of the user space buffer */
871 unsigned long end_page;
872 /* the page of the start address of the user space buffer */
873 unsigned long start_page;
874 /* the range in pages */
875 unsigned long num_pages;
876 struct page **page_array;
877 struct sep_lli_entry_t *lli_array;
881 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
883 /* set start and end pages and num pages */
884 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
885 start_page = app_virt_addr >> PAGE_SHIFT;
886 num_pages = end_page - start_page + 1;
888 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
889 edbg("SEP Driver: data_size is %lu\n", data_size);
890 edbg("SEP Driver: start_page is %lu\n", start_page);
891 edbg("SEP Driver: end_page is %lu\n", end_page);
892 edbg("SEP Driver: num_pages is %lu\n", num_pages);
894 /* allocate array of pages structure pointers */
895 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
897 edbg("SEP Driver: kmalloc for page_array failed\n");
903 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
905 edbg("SEP Driver: kmalloc for lli_array failed\n");
908 goto end_function_with_error1;
911 /* convert the application virtual address into a set of physical */
912 down_read(¤t->mm->mmap_sem);
913 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
914 up_read(¤t->mm->mmap_sem);
916 /* check the number of pages locked - if not all then exit with error */
917 if (result != num_pages) {
918 dbg("SEP Driver: not all pages locked by get_user_pages\n");
921 goto end_function_with_error2;
924 /* flush the cache */
925 for (count = 0; count < num_pages; count++)
926 flush_dcache_page(page_array[count]);
928 /* set the start address of the first page - app data may start not at
929 the beginning of the page */
930 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
932 /* check that not all the data is in the first page only */
933 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
934 lli_array[0].block_size = data_size;
936 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
939 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
941 /* go from the second page to the prev before last */
942 for (count = 1; count < (num_pages - 1); count++) {
943 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
944 lli_array[count].block_size = PAGE_SIZE;
946 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
949 /* if more then 1 pages locked - then update for the last page size needed */
951 /* update the address of the last page */
952 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
954 /* set the size of the last page */
955 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
957 if (lli_array[count].block_size == 0) {
958 dbg("app_virt_addr is %08lx\n", app_virt_addr);
959 dbg("data_size is %lu\n", data_size);
962 edbg("lli_array[%lu].physical_address is %08lx, \
963 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
966 /* set output params */
967 *lli_array_ptr = lli_array;
968 *num_pages_ptr = num_pages;
969 *page_array_ptr = page_array;
972 end_function_with_error2:
973 /* release the cache */
974 for (count = 0; count < num_pages; count++)
975 page_cache_release(page_array[count]);
977 end_function_with_error1:
980 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
986 this function calculates the size of data that can be inserted into the lli
987 table from this array the condition is that either the table is full
988 (all etnries are entered), or there are no more entries in the lli array
990 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
992 unsigned long table_data_size = 0;
993 unsigned long counter;
995 /* calculate the data in the out lli table if till we fill the whole
996 table or till the data has ended */
997 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
998 table_data_size += lli_in_array_ptr[counter].block_size;
999 return table_data_size;
1003 this functions builds ont lli table from the lli_array according to
1004 the given size of data
1006 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1008 unsigned long curr_table_data_size;
1009 /* counter of lli array entry */
1010 unsigned long array_counter;
1012 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1014 /* init currrent table data size and lli array entry counter */
1015 curr_table_data_size = 0;
1017 *num_table_entries_ptr = 1;
1019 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1021 /* fill the table till table size reaches the needed amount */
1022 while (curr_table_data_size < table_data_size) {
1023 /* update the number of entries in table */
1024 (*num_table_entries_ptr)++;
1026 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1027 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1028 curr_table_data_size += lli_table_ptr->block_size;
1030 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1031 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1032 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1034 /* check for overflow of the table data */
1035 if (curr_table_data_size > table_data_size) {
1036 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1038 /* update the size of block in the table */
1039 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1041 /* update the physical address in the lli array */
1042 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1044 /* update the block size left in the lli array */
1045 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1047 /* advance to the next entry in the lli_array */
1050 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1051 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1053 /* move to the next entry in table */
1057 /* set the info entry to default */
1058 lli_table_ptr->physical_address = 0xffffffff;
1059 lli_table_ptr->block_size = 0;
1061 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1062 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1063 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1065 /* set the output parameter */
1066 *num_processed_entries_ptr += array_counter;
1068 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1069 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1074 this function goes over the list of the print created tables and
1077 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1079 unsigned long table_count;
1080 unsigned long entries_count;
1082 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1085 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1086 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1087 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1089 /* print entries of the table (without info entry) */
1090 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1091 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1092 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1095 /* point to the info entry */
1098 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1099 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1102 table_data_size = lli_table_ptr->block_size & 0xffffff;
1103 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1104 lli_table_ptr = (struct sep_lli_entry_t *)
1105 (lli_table_ptr->physical_address);
1107 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1109 if ((unsigned long) lli_table_ptr != 0xffffffff)
1110 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_phys_to_virt(sep, (unsigned long) lli_table_ptr);
1114 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1119 This function prepares only input DMA table for synhronic symmetric
1122 static int sep_prepare_input_dma_table(struct sep_device *sep,
1123 unsigned long app_virt_addr,
1124 unsigned long data_size,
1125 unsigned long block_size,
1126 unsigned long *lli_table_ptr,
1127 unsigned long *num_entries_ptr,
1128 unsigned long *table_data_size_ptr,
1129 bool isKernelVirtualAddress)
1131 /* pointer to the info entry of the table - the last entry */
1132 struct sep_lli_entry_t *info_entry_ptr;
1133 /* array of pointers ot page */
1134 struct sep_lli_entry_t *lli_array_ptr;
1135 /* points to the first entry to be processed in the lli_in_array */
1136 unsigned long current_entry;
1137 /* num entries in the virtual buffer */
1138 unsigned long sep_lli_entries;
1139 /* lli table pointer */
1140 struct sep_lli_entry_t *in_lli_table_ptr;
1141 /* the total data in one table */
1142 unsigned long table_data_size;
1143 /* number of entries in lli table */
1144 unsigned long num_entries_in_table;
1145 /* next table address */
1146 unsigned long lli_table_alloc_addr;
1147 unsigned long result;
1149 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1151 edbg("SEP Driver:data_size is %lu\n", data_size);
1152 edbg("SEP Driver:block_size is %lu\n", block_size);
1154 /* initialize the pages pointers */
1155 sep->in_page_array = 0;
1156 sep->in_num_pages = 0;
1158 if (data_size == 0) {
1159 /* special case - created 2 entries table with zero data */
1160 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1161 in_lli_table_ptr->physical_address = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1162 in_lli_table_ptr->block_size = 0;
1165 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1166 in_lli_table_ptr->block_size = 0;
1168 *lli_table_ptr = sep->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1169 *num_entries_ptr = 2;
1170 *table_data_size_ptr = 0;
1175 /* check if the pages are in Kernel Virtual Address layout */
1176 if (isKernelVirtualAddress == true)
1177 /* lock the pages of the kernel buffer and translate them to pages */
1178 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1180 /* lock the pages of the user buffer and translate them to pages */
1181 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1186 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1190 sep_lli_entries = sep->in_num_pages;
1192 /* initiate to point after the message area */
1193 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1195 /* loop till all the entries in in array are not processed */
1196 while (current_entry < sep_lli_entries) {
1197 /* set the new input and output tables */
1198 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1200 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1202 /* calculate the maximum size of data for input table */
1203 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1205 /* now calculate the table size so that it will be module block size */
1206 table_data_size = (table_data_size / block_size) * block_size;
1208 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1210 /* construct input lli table */
1211 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
1213 if (info_entry_ptr == 0) {
1214 /* set the output parameters to physical addresses */
1215 *lli_table_ptr = sep_shared_area_virt_to_phys(sep, (unsigned long) in_lli_table_ptr);
1216 *num_entries_ptr = num_entries_in_table;
1217 *table_data_size_ptr = table_data_size;
1219 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1221 /* update the info entry of the previous in table */
1222 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys(sep, (unsigned long) in_lli_table_ptr);
1223 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1226 /* save the pointer to the info entry of the current tables */
1227 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1230 /* print input tables */
1231 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1232 sep_shared_area_phys_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1234 /* the array of the pages */
1235 kfree(lli_array_ptr);
1237 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1243 This function creates the input and output dma tables for
1244 symmetric operations (AES/DES) according to the block size from LLI arays
1246 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1247 struct sep_lli_entry_t *lli_in_array,
1248 unsigned long sep_in_lli_entries,
1249 struct sep_lli_entry_t *lli_out_array,
1250 unsigned long sep_out_lli_entries,
1251 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1253 /* points to the area where next lli table can be allocated */
1254 unsigned long lli_table_alloc_addr;
1255 /* input lli table */
1256 struct sep_lli_entry_t *in_lli_table_ptr;
1257 /* output lli table */
1258 struct sep_lli_entry_t *out_lli_table_ptr;
1259 /* pointer to the info entry of the table - the last entry */
1260 struct sep_lli_entry_t *info_in_entry_ptr;
1261 /* pointer to the info entry of the table - the last entry */
1262 struct sep_lli_entry_t *info_out_entry_ptr;
1263 /* points to the first entry to be processed in the lli_in_array */
1264 unsigned long current_in_entry;
1265 /* points to the first entry to be processed in the lli_out_array */
1266 unsigned long current_out_entry;
1267 /* max size of the input table */
1268 unsigned long in_table_data_size;
1269 /* max size of the output table */
1270 unsigned long out_table_data_size;
1271 /* flag te signifies if this is the first tables build from the arrays */
1272 unsigned long first_table_flag;
1273 /* the data size that should be in table */
1274 unsigned long table_data_size;
1275 /* number of etnries in the input table */
1276 unsigned long num_entries_in_table;
1277 /* number of etnries in the output table */
1278 unsigned long num_entries_out_table;
1280 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1282 /* initiate to pint after the message area */
1283 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1285 current_in_entry = 0;
1286 current_out_entry = 0;
1287 first_table_flag = 1;
1288 info_in_entry_ptr = 0;
1289 info_out_entry_ptr = 0;
1291 /* loop till all the entries in in array are not processed */
1292 while (current_in_entry < sep_in_lli_entries) {
1293 /* set the new input and output tables */
1294 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1296 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1298 /* set the first output tables */
1299 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1301 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1303 /* calculate the maximum size of data for input table */
1304 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1306 /* calculate the maximum size of data for output table */
1307 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1309 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1310 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1312 /* check where the data is smallest */
1313 table_data_size = in_table_data_size;
1314 if (table_data_size > out_table_data_size)
1315 table_data_size = out_table_data_size;
1317 /* now calculate the table size so that it will be module block size */
1318 table_data_size = (table_data_size / block_size) * block_size;
1320 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1322 /* construct input lli table */
1323 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
1325 /* construct output lli table */
1326 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
1328 /* if info entry is null - this is the first table built */
1329 if (info_in_entry_ptr == 0) {
1330 /* set the output parameters to physical addresses */
1331 *lli_table_in_ptr = sep_shared_area_virt_to_phys(sep, (unsigned long) in_lli_table_ptr);
1332 *in_num_entries_ptr = num_entries_in_table;
1333 *lli_table_out_ptr = sep_shared_area_virt_to_phys(sep, (unsigned long) out_lli_table_ptr);
1334 *out_num_entries_ptr = num_entries_out_table;
1335 *table_data_size_ptr = table_data_size;
1337 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1338 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1340 /* update the info entry of the previous in table */
1341 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_phys(sep, (unsigned long) in_lli_table_ptr);
1342 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1344 /* update the info entry of the previous in table */
1345 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_phys(sep, (unsigned long) out_lli_table_ptr);
1346 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1349 /* save the pointer to the info entry of the current tables */
1350 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1351 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1353 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1354 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1355 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1358 /* print input tables */
1359 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1360 sep_shared_area_phys_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1361 /* print output tables */
1362 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1363 sep_shared_area_phys_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1364 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1370 This function builds input and output DMA tables for synhronic
1371 symmetric operations (AES, DES). It also checks that each table
1372 is of the modular block size
1374 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1375 unsigned long app_virt_in_addr,
1376 unsigned long app_virt_out_addr,
1377 unsigned long data_size,
1378 unsigned long block_size,
1379 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1381 /* array of pointers of page */
1382 struct sep_lli_entry_t *lli_in_array;
1383 /* array of pointers of page */
1384 struct sep_lli_entry_t *lli_out_array;
1387 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1389 /* initialize the pages pointers */
1390 sep->in_page_array = 0;
1391 sep->out_page_array = 0;
1393 /* check if the pages are in Kernel Virtual Address layout */
1394 if (isKernelVirtualAddress == true) {
1395 /* lock the pages of the kernel buffer and translate them to pages */
1396 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1398 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1402 /* lock the pages of the user buffer and translate them to pages */
1403 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1405 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1410 if (isKernelVirtualAddress == true) {
1411 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1413 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1414 goto end_function_with_error1;
1417 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1419 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1420 goto end_function_with_error1;
1423 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1424 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1425 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1428 /* call the fucntion that creates table from the lli arrays */
1429 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1431 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1432 goto end_function_with_error2;
1435 /* fall through - free the lli entry arrays */
1436 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1437 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1438 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1439 end_function_with_error2:
1440 kfree(lli_out_array);
1441 end_function_with_error1:
1442 kfree(lli_in_array);
1444 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1450 this function handles tha request for creation of the DMA table
1451 for the synchronic symmetric operations (AES,DES)
1453 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1457 /* command arguments */
1458 struct sep_driver_build_sync_table_t command_args;
1460 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1462 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1466 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1467 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1468 edbg("data_size is %lu\n", command_args.data_in_size);
1469 edbg("block_size is %lu\n", command_args.block_size);
1471 /* check if we need to build only input table or input/output */
1472 if (command_args.app_out_address)
1473 /* prepare input and output tables */
1474 error = sep_prepare_input_output_dma_table(sep,
1475 command_args.app_in_address,
1476 command_args.app_out_address,
1477 command_args.data_in_size,
1478 command_args.block_size,
1479 &command_args.in_table_address,
1480 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1482 /* prepare input tables */
1483 error = sep_prepare_input_dma_table(sep,
1484 command_args.app_in_address,
1485 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1490 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
1491 /* FIXME: wrong error returned ! */
1493 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1498 this function handles the request for freeing dma table for synhronic actions
1500 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1502 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1504 /* free input pages array */
1505 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1507 /* free output pages array if needed */
1508 if (sep->out_page_array)
1509 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1511 /* reset all the values */
1512 sep->in_page_array = 0;
1513 sep->out_page_array = 0;
1514 sep->in_num_pages = 0;
1515 sep->out_num_pages = 0;
1516 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1521 this function find a space for the new flow dma table
1523 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1524 unsigned long **table_address_ptr)
1527 /* pointer to the id field of the flow dma table */
1528 unsigned long *start_table_ptr;
1529 unsigned long flow_dma_area_start_addr;
1530 unsigned long flow_dma_area_end_addr;
1531 /* maximum table size in words */
1532 unsigned long table_size_in_words;
1534 /* find the start address of the flow DMA table area */
1535 flow_dma_area_start_addr = sep->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1537 /* set end address of the flow table area */
1538 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1540 /* set table size in words */
1541 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1543 /* set the pointer to the start address of DMA area */
1544 start_table_ptr = (unsigned long *) flow_dma_area_start_addr;
1546 /* find the space for the next table */
1547 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && ((unsigned long) start_table_ptr < flow_dma_area_end_addr))
1548 start_table_ptr += table_size_in_words;
1550 /* check if we reached the end of floa tables area */
1551 if ((unsigned long) start_table_ptr >= flow_dma_area_end_addr)
1554 *table_address_ptr = start_table_ptr;
1560 This function creates one DMA table for flow and returns its data,
1561 and pointer to its info entry
1563 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1564 unsigned long virt_buff_addr,
1565 unsigned long virt_buff_size,
1566 struct sep_lli_entry_t *table_data,
1567 struct sep_lli_entry_t **info_entry_ptr,
1568 struct sep_flow_context_t *flow_data_ptr,
1569 bool isKernelVirtualAddress)
1572 /* the range in pages */
1573 unsigned long lli_array_size;
1574 struct sep_lli_entry_t *lli_array;
1575 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1576 unsigned long *start_dma_table_ptr;
1577 /* total table data counter */
1578 unsigned long dma_table_data_count;
1579 /* pointer that will keep the pointer to the pages of the virtual buffer */
1580 struct page **page_array_ptr;
1581 unsigned long entry_count;
1583 /* find the space for the new table */
1584 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1588 /* check if the pages are in Kernel Virtual Address layout */
1589 if (isKernelVirtualAddress == true)
1590 /* lock kernel buffer in the memory */
1591 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1593 /* lock user buffer in the memory */
1594 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1599 /* set the pointer to page array at the beginning of table - this table is
1600 now considered taken */
1601 *start_dma_table_ptr = lli_array_size;
1603 /* point to the place of the pages pointers of the table */
1604 start_dma_table_ptr++;
1606 /* set the pages pointer */
1607 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1609 /* set the pointer to the first entry */
1610 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1612 /* now create the entries for table */
1613 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1614 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1616 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1618 /* set the total data of a table */
1619 dma_table_data_count += lli_array[entry_count].block_size;
1621 flow_dma_table_entry_ptr++;
1624 /* set the physical address */
1625 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1627 /* set the num_entries and total data size */
1628 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1630 /* set the info entry */
1631 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1632 flow_dma_table_entry_ptr->block_size = 0;
1634 /* set the pointer to info entry */
1635 *info_entry_ptr = flow_dma_table_entry_ptr;
1637 /* the array of the lli entries */
1646 This function creates a list of tables for flow and returns the data for
1647 the first and last tables of the list
1649 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1650 unsigned long num_virtual_buffers,
1651 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1654 unsigned long virt_buff_addr;
1655 unsigned long virt_buff_size;
1656 struct sep_lli_entry_t table_data;
1657 struct sep_lli_entry_t *info_entry_ptr;
1658 struct sep_lli_entry_t *prev_info_entry_ptr;
1663 prev_info_entry_ptr = 0;
1665 /* init the first table to default */
1666 table_data.physical_address = 0xffffffff;
1667 first_table_data_ptr->physical_address = 0xffffffff;
1668 table_data.block_size = 0;
1670 for (i = 0; i < num_virtual_buffers; i++) {
1671 /* get the virtual buffer address */
1672 error = get_user(virt_buff_addr, &first_buff_addr);
1676 /* get the virtual buffer size */
1678 error = get_user(virt_buff_size, &first_buff_addr);
1682 /* advance the address to point to the next pair of address|size */
1685 /* now prepare the one flow LLI table from the data */
1686 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1691 /* if this is the first table - save it to return to the user
1693 *first_table_data_ptr = table_data;
1695 /* set the pointer to info entry */
1696 prev_info_entry_ptr = info_entry_ptr;
1698 /* not first table - the previous table info entry should
1700 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1702 /* set the pointer to info entry */
1703 prev_info_entry_ptr = info_entry_ptr;
1707 /* set the last table data */
1708 *last_table_data_ptr = table_data;
1714 this function goes over all the flow tables connected to the given
1715 table and deallocate them
1717 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1720 unsigned long *table_ptr;
1721 /* end address of the flow dma area */
1722 unsigned long num_entries;
1723 unsigned long num_pages;
1724 struct page **pages_ptr;
1725 /* maximum table size in words */
1726 struct sep_lli_entry_t *info_entry_ptr;
1728 /* set the pointer to the first table */
1729 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1731 /* set the num of entries */
1732 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1733 & SEP_NUM_ENTRIES_MASK;
1735 /* go over all the connected tables */
1736 while (*table_ptr != 0xffffffff) {
1737 /* get number of pages */
1738 num_pages = *(table_ptr - 2);
1740 /* get the pointer to the pages */
1741 pages_ptr = (struct page **) (*(table_ptr - 1));
1743 /* free the pages */
1744 sep_free_dma_pages(pages_ptr, num_pages, 1);
1746 /* goto to the info entry */
1747 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1749 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1750 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1757 This function returns pointer to the flow data structure
1758 that contains the given id
1762 static int sep_find_flow_context(struct sep_device *sep,
1763 unsigned long flow_id,
1764 struct sep_flow_context_t **flow_data_ptr)
1766 unsigned long count;
1770 always search for flow with id default first - in case we
1771 already started working on the flow there can be no situation
1772 when 2 flows are with default flag
1774 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1775 if (sep->flows[count].flow_id == flow_id) {
1776 *flow_data_ptr = &sep->flows[count];
1781 if (count == SEP_DRIVER_NUM_FLOWS)
1790 this function handles the request to create the DMA tables for flow
1792 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1796 struct sep_driver_build_flow_table_t command_args;
1797 /* first table - output */
1798 struct sep_lli_entry_t first_table_data;
1799 /* dma table data */
1800 struct sep_lli_entry_t last_table_data;
1801 /* pointer to the info entry of the previuos DMA table */
1802 struct sep_lli_entry_t *prev_info_entry_ptr;
1803 /* pointer to the flow data strucutre */
1804 struct sep_flow_context_t *flow_context_ptr;
1806 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1808 /* init variables */
1809 prev_info_entry_ptr = 0;
1810 first_table_data.physical_address = 0xffffffff;
1812 /* find the free structure for flow data */
1813 error = sep_find_flow_context(sep, SEP_FREE_FLOW_ID, &flow_context_ptr);
1817 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1821 /* create flow tables */
1822 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1824 goto end_function_with_error;
1826 /* check if flow is static */
1827 if (!command_args.flow_type)
1828 /* point the info entry of the last to the info entry of the first */
1829 last_table_data = first_table_data;
1831 /* set output params */
1832 command_args.first_table_addr = first_table_data.physical_address;
1833 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1834 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1836 /* send the parameters to user application */
1837 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1839 goto end_function_with_error;
1841 /* all the flow created - update the flow entry with temp id */
1842 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1844 /* set the processing tables data in the context */
1845 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1846 flow_context_ptr->input_tables_in_process = first_table_data;
1848 flow_context_ptr->output_tables_in_process = first_table_data;
1852 end_function_with_error:
1853 /* free the allocated tables */
1854 sep_deallocated_flow_tables(&first_table_data);
1856 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1861 this function handles add tables to flow
1863 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1866 unsigned long num_entries;
1867 struct sep_driver_add_flow_table_t command_args;
1868 struct sep_flow_context_t *flow_context_ptr;
1869 /* first dma table data */
1870 struct sep_lli_entry_t first_table_data;
1871 /* last dma table data */
1872 struct sep_lli_entry_t last_table_data;
1873 /* pointer to the info entry of the current DMA table */
1874 struct sep_lli_entry_t *info_entry_ptr;
1876 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1878 /* get input parameters */
1879 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1883 /* find the flow structure for the flow id */
1884 error = sep_find_flow_context(sep, command_args.flow_id, &flow_context_ptr);
1888 /* prepare the flow dma tables */
1889 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1891 goto end_function_with_error;
1893 /* now check if there is already an existing add table for this flow */
1894 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1895 /* this buffer was for input buffers */
1896 if (flow_context_ptr->input_tables_flag) {
1897 /* add table already exists - add the new tables to the end
1899 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1901 info_entry_ptr = (struct sep_lli_entry_t *)
1902 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1904 /* connect to list of tables */
1905 *info_entry_ptr = first_table_data;
1907 /* set the first table data */
1908 first_table_data = flow_context_ptr->first_input_table;
1910 /* set the input flag */
1911 flow_context_ptr->input_tables_flag = 1;
1913 /* set the first table data */
1914 flow_context_ptr->first_input_table = first_table_data;
1916 /* set the last table data */
1917 flow_context_ptr->last_input_table = last_table_data;
1918 } else { /* this is output tables */
1920 /* this buffer was for input buffers */
1921 if (flow_context_ptr->output_tables_flag) {
1922 /* add table already exists - add the new tables to
1923 the end of the previous */
1924 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1926 info_entry_ptr = (struct sep_lli_entry_t *)
1927 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1929 /* connect to list of tables */
1930 *info_entry_ptr = first_table_data;
1932 /* set the first table data */
1933 first_table_data = flow_context_ptr->first_output_table;
1935 /* set the input flag */
1936 flow_context_ptr->output_tables_flag = 1;
1938 /* set the first table data */
1939 flow_context_ptr->first_output_table = first_table_data;
1941 /* set the last table data */
1942 flow_context_ptr->last_output_table = last_table_data;
1945 /* set output params */
1946 command_args.first_table_addr = first_table_data.physical_address;
1947 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1948 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1950 /* send the parameters to user application */
1951 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1952 end_function_with_error:
1953 /* free the allocated tables */
1954 sep_deallocated_flow_tables(&first_table_data);
1956 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1961 this function add the flow add message to the specific flow
1963 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1966 struct sep_driver_add_message_t command_args;
1967 struct sep_flow_context_t *flow_context_ptr;
1969 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1971 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1976 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1981 /* find the flow context */
1982 error = sep_find_flow_context(sep, command_args.flow_id, &flow_context_ptr);
1986 /* copy the message into context */
1987 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1988 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1990 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1996 this function returns the physical and virtual addresses of the static pool
1998 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
2001 struct sep_driver_static_pool_addr_t command_args;
2003 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2005 /*prepare the output parameters in the struct */
2006 command_args.physical_static_address = sep->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2007 command_args.virtual_static_address = sep->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2009 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
2011 /* send the parameters to user application */
2012 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2013 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2018 this address gets the offset of the physical address from the start
2021 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2024 struct sep_driver_get_mapped_offset_t command_args;
2026 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2028 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2032 if (command_args.physical_address < sep->shared_area_bus) {
2038 /*prepare the output parameters in the struct */
2039 command_args.offset = command_args.physical_address - sep->shared_area_bus;
2041 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2043 /* send the parameters to user application */
2044 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2046 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2054 static int sep_start_handler(struct sep_device *sep)
2056 unsigned long reg_val;
2057 unsigned long error = 0;
2059 dbg("SEP Driver:--------> sep_start_handler start\n");
2061 /* wait in polling for message from SEP */
2063 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2066 /* check the value */
2068 /* fatal error - read error status from GPRO */
2069 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2070 dbg("SEP Driver:<-------- sep_start_handler end\n");
2075 this function handles the request for SEP initialization
2077 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2079 unsigned long message_word;
2080 unsigned long *message_ptr;
2081 struct sep_driver_init_t command_args;
2082 unsigned long counter;
2083 unsigned long error;
2084 unsigned long reg_val;
2086 dbg("SEP Driver:--------> sep_init_handler start\n");
2089 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2091 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2096 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2097 /*sep_configure_dma_burst(); */
2099 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2101 message_ptr = (unsigned long *) command_args.message_addr;
2103 /* set the base address of the SRAM */
2104 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2106 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2107 get_user(message_word, message_ptr);
2108 /* write data to SRAM */
2109 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2110 edbg("SEP Driver:message_word is %lu\n", message_word);
2111 /* wait for write complete */
2112 sep_wait_sram_write(sep);
2114 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2116 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2119 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2120 while (!(reg_val & 0xFFFFFFFD));
2122 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2124 /* check the value */
2125 if (reg_val == 0x1) {
2126 edbg("SEP Driver:init failed\n");
2128 error = sep_read_reg(sep, 0x8060);
2129 edbg("SEP Driver:sw monitor is %lu\n", error);
2131 /* fatal error - read erro status from GPRO */
2132 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2133 edbg("SEP Driver:error is %lu\n", error);
2136 dbg("SEP Driver:<-------- sep_init_handler end\n");
2142 this function handles the request cache and resident reallocation
2144 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2148 unsigned long phys_cache_address;
2149 unsigned long phys_resident_address;
2150 struct sep_driver_realloc_cache_resident_t command_args;
2153 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
2157 /* copy cache and resident to the their intended locations */
2158 error = sep_copy_cache_resident_to_area(sep, command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
2162 command_args.new_base_addr = sep->shared_area_bus;
2164 /* find the new base address according to the lowest address between
2165 cache, resident and shared area */
2166 if (phys_resident_address < command_args.new_base_addr)
2167 command_args.new_base_addr = phys_resident_address;
2168 if (phys_cache_address < command_args.new_base_addr)
2169 command_args.new_base_addr = phys_cache_address;
2171 /* set the return parameters */
2172 command_args.new_cache_addr = phys_cache_address;
2173 command_args.new_resident_addr = phys_resident_address;
2175 /* set the new shared area */
2176 command_args.new_shared_area_addr = sep->shared_area_bus;
2178 edbg("SEP Driver:command_args.new_shared_area is %08lx\n", command_args.new_shared_area_addr);
2179 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
2180 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
2181 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
2183 /* return to user */
2184 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2190 this function handles the request for get time
2192 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2195 struct sep_driver_get_time_t command_args;
2197 error = sep_set_time(sep, &command_args.time_physical_address, &command_args.time_value);
2199 error = copy_to_user((void __user *)arg,
2200 &command_args, sizeof(struct sep_driver_get_time_t));
2206 This API handles the end transaction request
2208 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2210 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2212 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2214 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2216 /* release IRQ line */
2217 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2219 /* lock the sep mutex */
2220 mutex_unlock(&sep_mutex);
2223 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2230 This function handler the set flow id command
2232 static int sep_set_flow_id_handler(struct sep_device *sep, unsigned long arg)
2235 unsigned long flow_id;
2236 struct sep_flow_context_t *flow_data_ptr;
2238 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2240 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2244 /* find the flow data structure that was just used for creating new flow
2245 - its id should be default */
2246 error = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID, &flow_data_ptr);
2251 flow_data_ptr->flow_id = flow_id;
2254 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2262 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2265 struct sep_device *sep = filp->private_data;
2267 dbg("------------>SEP Driver: ioctl start\n");
2269 edbg("SEP Driver: cmd is %x\n", cmd);
2271 /* check that the command is for sep device */
2272 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2276 case SEP_IOCSENDSEPCOMMAND:
2277 /* send command to SEP */
2278 sep_send_command_handler(sep);
2279 edbg("SEP Driver: after sep_send_command_handler\n");
2281 case SEP_IOCSENDSEPRPLYCOMMAND:
2282 /* send reply command to SEP */
2283 sep_send_reply_command_handler(sep);
2285 case SEP_IOCALLOCDATAPOLL:
2286 /* allocate data pool */
2287 error = sep_allocate_data_pool_memory_handler(sep, arg);
2289 case SEP_IOCWRITEDATAPOLL:
2290 /* write data into memory pool */
2291 error = sep_write_into_data_pool_handler(sep, arg);
2293 case SEP_IOCREADDATAPOLL:
2294 /* read data from data pool into application memory */
2295 error = sep_read_from_data_pool_handler(sep, arg);
2297 case SEP_IOCCREATESYMDMATABLE:
2298 /* create dma table for synhronic operation */
2299 error = sep_create_sync_dma_tables_handler(sep, arg);
2301 case SEP_IOCCREATEFLOWDMATABLE:
2302 /* create flow dma tables */
2303 error = sep_create_flow_dma_tables_handler(sep, arg);
2305 case SEP_IOCFREEDMATABLEDATA:
2306 /* free the pages */
2307 error = sep_free_dma_table_data_handler(sep);
2309 case SEP_IOCSETFLOWID:
2311 error = sep_set_flow_id_handler(sep, arg);
2313 case SEP_IOCADDFLOWTABLE:
2314 /* add tables to the dynamic flow */
2315 error = sep_add_flow_tables_handler(sep, arg);
2317 case SEP_IOCADDFLOWMESSAGE:
2318 /* add message of add tables to flow */
2319 error = sep_add_flow_tables_message_handler(sep, arg);
2321 case SEP_IOCSEPSTART:
2322 /* start command to sep */
2323 error = sep_start_handler(sep);
2325 case SEP_IOCSEPINIT:
2326 /* init command to sep */
2327 error = sep_init_handler(sep, arg);
2329 case SEP_IOCGETSTATICPOOLADDR:
2330 /* get the physical and virtual addresses of the static pool */
2331 error = sep_get_static_pool_addr_handler(sep, arg);
2333 case SEP_IOCENDTRANSACTION:
2334 error = sep_end_transaction_handler(sep, arg);
2336 case SEP_IOCREALLOCCACHERES:
2337 error = sep_realloc_cache_resident_handler(sep, arg);
2339 case SEP_IOCGETMAPPEDADDROFFSET:
2340 error = sep_get_physical_mapped_offset_handler(sep, arg);
2343 error = sep_get_time_handler(sep, arg);
2349 dbg("SEP Driver:<-------- ioctl end\n");
2355 #if !SEP_DRIVER_POLLING_MODE
2357 /* handler for flow done interrupt */
2359 static void sep_flow_done_handler(struct work_struct *work)
2361 struct sep_flow_context_t *flow_data_ptr;
2363 /* obtain the mutex */
2364 mutex_lock(&sep_mutex);
2366 /* get the pointer to context */
2367 flow_data_ptr = (struct sep_flow_context_t *) work;
2369 /* free all the current input tables in sep */
2370 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2372 /* free all the current tables output tables in SEP (if needed) */
2373 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2374 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2376 /* check if we have additional tables to be sent to SEP only input
2377 flag may be checked */
2378 if (flow_data_ptr->input_tables_flag) {
2379 /* copy the message to the shared RAM and signal SEP */
2380 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_area, flow_data_ptr->message_size_in_bytes);
2382 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2384 mutex_unlock(&sep_mutex);
2387 interrupt handler function
2389 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2391 irqreturn_t int_error;
2392 unsigned long error;
2393 unsigned long reg_val;
2394 unsigned long flow_id;
2395 struct sep_flow_context_t *flow_context_ptr;
2396 struct sep_device *sep = dev_id;
2398 int_error = IRQ_HANDLED;
2400 /* read the IRR register to check if this is SEP interrupt */
2401 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2402 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2404 /* check if this is the flow interrupt */
2405 if (0 /*reg_val & (0x1 << 11) */ ) {
2406 /* read GPRO to find out the which flow is done */
2407 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2409 /* find the contex of the flow */
2410 error = sep_find_flow_context(sep, flow_id >> 28, &flow_context_ptr);
2412 goto end_function_with_error;
2414 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2416 /* queue the work */
2417 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2420 /* check if this is reply interrupt from SEP */
2421 if (reg_val & (0x1 << 13)) {
2422 /* update the counter of reply messages */
2425 /* wake up the waiting process */
2426 wake_up(&sep_event);
2428 int_error = IRQ_NONE;
2432 end_function_with_error:
2433 /* clear the interrupt */
2434 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2445 static void sep_wait_busy(struct sep_device *sep)
2450 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2455 PATCH for configuring the DMA to single burst instead of multi-burst
2457 static void sep_configure_dma_burst(struct sep_device *sep)
2459 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2461 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2463 /* request access to registers from SEP */
2464 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2466 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2470 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2472 /* set the DMA burst register to single burst */
2473 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2475 /* release the sep busy */
2476 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2479 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2484 Function that is activaed on the succesful probe of the SEP device
2486 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2489 struct sep_device *sep;
2491 int size; /* size of memory for allocation */
2493 edbg("Sep pci probe starting\n");
2494 if (sep_dev != NULL) {
2495 dev_warn(&pdev->dev, "only one SEP supported.\n");
2499 /* enable the device */
2500 error = pci_enable_device(pdev);
2502 edbg("error enabling pci device\n");
2506 /* set the pci dev pointer */
2507 sep_dev = &sep_instance;
2508 sep = &sep_instance;
2510 edbg("sep->shared_area = %lx\n", (unsigned long) &sep->shared_area);
2511 /* transaction counter that coordinates the transactions between SEP
2514 /* counter for the messages from sep */
2516 /* counter for the number of bytes allocated in the pool
2517 for the current transaction */
2518 sep->data_pool_bytes_allocated = 0;
2520 /* calculate the total size for allocation */
2521 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2522 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2524 /* allocate the shared area */
2525 if (sep_map_and_alloc_shared_area(sep, size, &sep->shared_area, &sep->shared_area_bus)) {
2527 /* allocation failed */
2528 goto end_function_error;
2530 /* now set the memory regions */
2531 sep->message_shared_area_addr = sep->shared_area;
2533 edbg("SEP Driver: sep->message_shared_area_addr is %08lx\n", sep->message_shared_area_addr);
2535 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2536 /* send the new SHARED MESSAGE AREA to the SEP */
2537 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_area_bus);
2539 /* poll for SEP response */
2540 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2541 while (retVal != 0xffffffff && retVal != sep->shared_area_bus)
2542 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2544 /* check the return value (register) */
2545 if (retVal != sep->shared_area_bus) {
2547 goto end_function_deallocate_sep_shared_area;
2550 /* init the flow contextes */
2551 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2552 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2554 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2555 if (sep->flow_wq == NULL) {
2557 edbg("sep_driver:flow queue creation failed\n");
2558 goto end_function_deallocate_sep_shared_area;
2560 edbg("SEP Driver: create flow workqueue \n");
2561 /* load the rom code */
2562 sep_load_rom_code(sep);
2564 sep->pdev = pci_dev_get(pdev);
2566 /* get the io memory start address */
2567 sep->io_bus = pci_resource_start(pdev, 0);
2569 edbg("SEP Driver error pci resource start\n");
2570 goto end_function_deallocate_sep_shared_area;
2573 /* get the io memory end address */
2574 sep->io_end_bus = pci_resource_end(pdev, 0);
2575 if (!sep->io_end_bus) {
2576 edbg("SEP Driver error pci resource end\n");
2577 goto end_function_deallocate_sep_shared_area;
2580 sep->io_memory_size = sep->io_end_bus - sep->io_bus + 1;
2582 edbg("SEP Driver:io_bus is %08lx\n", sep->io_bus);
2584 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep->io_end_bus);
2586 edbg("SEP Driver:io_memory_size is %08lx\n", sep->io_memory_size);
2588 sep->io_addr = ioremap_nocache(sep->io_bus, sep->io_memory_size);
2589 if (!sep->io_addr) {
2590 edbg("SEP Driver error ioremap of io memory\n");
2591 goto end_function_deallocate_sep_shared_area;
2594 edbg("SEP Driver:io_addr is %p\n", sep->io_addr);
2596 sep->reg_addr = (void __iomem *) sep->io_addr;
2598 /* set up system base address and shared memory location */
2600 sep->rar_addr = kmalloc(2 * SEP_RAR_IO_MEM_REGION_SIZE, GFP_KERNEL);
2602 if (!sep->rar_addr) {
2603 edbg("SEP Driver:cant kmalloc rar\n");
2604 goto end_function_uniomap;
2607 sep->rar_bus = __pa(sep->rar_addr);
2609 edbg("SEP Driver:rar_physical is %08lx\n", sep->rar_bus);
2610 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2612 #if !SEP_DRIVER_POLLING_MODE
2614 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2616 /* clear ICR register */
2617 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2619 /* set the IMR register - open only GPR 2 */
2620 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2622 edbg("SEP Driver: about to call request_irq\n");
2623 /* get the interrupt line */
2624 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2626 goto end_function_free_res;
2629 edbg("SEP Driver: about to write IMR REG_ADDR");
2631 /* set the IMR register - open only GPR 2 */
2632 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2634 end_function_free_res:
2635 kfree(sep->rar_addr);
2636 #endif /* SEP_DRIVER_POLLING_MODE */
2637 end_function_uniomap:
2638 iounmap(sep->io_addr);
2639 end_function_deallocate_sep_shared_area:
2640 /* de-allocate shared area */
2641 sep_unmap_and_free_shared_area(size, sep->shared_area, sep->shared_area_bus);
2648 static struct pci_device_id sep_pci_id_tbl[] = {
2649 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2653 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2655 /* field for registering driver to PCI device */
2656 static struct pci_driver sep_pci_driver = {
2657 .name = "sep_sec_driver",
2658 .id_table = sep_pci_id_tbl,
2660 /* FIXME: remove handler */
2663 /* major and minor device numbers */
2664 static dev_t sep_devno;
2666 /* the files operations structure of the driver */
2667 static struct file_operations sep_file_operations = {
2668 .owner = THIS_MODULE,
2672 .release = sep_release,
2677 /* cdev struct of the driver */
2678 static struct cdev sep_cdev;
2681 this function registers the driver to the file system
2683 static int sep_register_driver_to_fs(void)
2685 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2687 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2692 cdev_init(&sep_cdev, &sep_file_operations);
2693 sep_cdev.owner = THIS_MODULE;
2695 /* register the driver with the kernel */
2696 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2699 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2700 goto end_function_unregister_devnum;
2705 end_function_unregister_devnum:
2707 /* unregister dev numbers */
2708 unregister_chrdev_region(sep_devno, 1);
2715 /*--------------------------------------------------------------
2717 ----------------------------------------------------------------*/
2718 static int __init sep_init(void)
2721 dbg("SEP Driver:-------->Init start\n");
2722 /* FIXME: Probe can occur before we are ready to survive a probe */
2723 ret_val = pci_register_driver(&sep_pci_driver);
2725 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2726 goto end_function_unregister_from_fs;
2728 /* register driver to fs */
2729 ret_val = sep_register_driver_to_fs();
2731 goto end_function_unregister_pci;
2733 end_function_unregister_pci:
2734 pci_unregister_driver(&sep_pci_driver);
2735 end_function_unregister_from_fs:
2736 /* unregister from fs */
2737 cdev_del(&sep_cdev);
2738 /* unregister dev numbers */
2739 unregister_chrdev_region(sep_devno, 1);
2741 dbg("SEP Driver:<-------- Init end\n");
2746 /*-------------------------------------------------------------
2748 --------------------------------------------------------------*/
2749 static void __exit sep_exit(void)
2753 dbg("SEP Driver:--------> Exit start\n");
2755 /* unregister from fs */
2756 cdev_del(&sep_cdev);
2757 /* unregister dev numbers */
2758 unregister_chrdev_region(sep_devno, 1);
2759 /* calculate the total size for de-allocation */
2760 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2761 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2762 /* FIXME: We need to do this in the unload for the device */
2763 /* free shared area */
2765 sep_unmap_and_free_shared_area(size, sep_dev->shared_area, sep_dev->shared_area_bus);
2766 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2767 iounmap((void *) sep_dev->reg_addr);
2768 edbg("SEP Driver: iounmap \n");
2770 edbg("SEP Driver: release_mem_region \n");
2771 dbg("SEP Driver:<-------- Exit end\n");
2775 module_init(sep_init);
2776 module_exit(sep_exit);
2778 MODULE_LICENSE("GPL");