3 * modules/buffer/swap_buffer_module.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2013
21 * 2013 Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
25 /* SWAP buffer queues implementation */
27 /* For all memory allocation/deallocation operations, except buffer memory
28 * allocation/deallocation should be used
29 * memory_allocation(size_t memory_size)
30 * memory_free(void* ptr)
32 * For subbuffer allocation/deallocation operations should be used
33 * buffer_allocation(size_t subbuffer_size)
34 * buffer_free(void *ptr, size_t subbuffer_size)
35 * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36 * use the following define:
37 * buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38 * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39 * It will be ok for user space, but fail in kernel space.
41 * See space_dep_types_and_def.h for details */
45 #include "buffer_queue.h"
46 #include "buffer_description.h"
47 #include "swap_buffer_to_buffer_queue.h"
48 #include "space_dep_operations.h"
50 typedef struct swap_buffer* write_start_ptr_type;
51 typedef struct swap_buffer* write_end_ptr_type;
52 typedef struct swap_buffer* read_start_ptr_type;
53 typedef struct swap_buffer* read_end_ptr_type;
55 static write_start_ptr_type queue_write_start_ptr = NULL; //Points to the
58 static write_end_ptr_type queue_write_end_ptr = NULL; //Points to the
61 static read_start_ptr_type queue_read_start_ptr = NULL; //Points to the read
64 static read_end_ptr_type queue_read_end_ptr = NULL; //Points to the read
66 static struct swap_buffer** queue_busy = NULL; //Pointers array. Points
68 static unsigned int queue_busy_last_element; //Store last occupied
69 //element in queue_busy
70 static unsigned int queue_subbuffer_count = 0; //Subbuffers count
71 static size_t queue_subbuffer_size = 0; //Subbuffers size
72 static buffer_access_sync_type buffer_read_sync; //add_to_read_list and
75 static buffer_access_sync_type buffer_write_sync; //add_to_write_list and
78 static buffer_access_sync_type buffer_busy_sync; //add_to_busy_list and
79 //remove_from_busy_list
81 static int pages_order_in_subbuffer = 0; //Page count in one
85 int buffer_queue_allocation(const size_t subbuffer_size,
86 const unsigned int subbuffers_count)
92 * -1 - memory for queue_busy wasn't allocated
93 * -2 - memory for swap_buffer structure wasn't allocated
94 * -3 - memory for buffer wasn't allocated
95 * -4 - semaphore cannot be inited
96 * -5 - sync primitives cannot be inited
99 /* Static varibles initialization */
100 queue_subbuffer_size = subbuffer_size;
101 queue_subbuffer_count = subbuffers_count;
102 queue_busy_last_element = 0;
104 /* Set variable pages_in_subbuffer. It is used for allocation and
105 * deallocation memory pages and its value is returned from
106 * swap_buffer_get() and contains page count in one subbuffer.
107 * All this useful only in kernel space. In userspace it is dummy.*/
108 set_pages_order_in_subbuffer(queue_subbuffer_size);
110 /* Sync primitives initialization */
111 if (buffer_access_init(&buffer_read_sync)) {
115 if (buffer_access_init(&buffer_write_sync)) {
119 if (buffer_access_init(&buffer_busy_sync)) {
124 /* Memory allocation for queue_busy */
125 queue_busy = memory_allocation(sizeof(struct swap_buffer*) *
126 queue_subbuffer_count);
133 /* Memory allocation for swap_buffer structures */
134 /* Allocation for first structure. */
136 queue_write_start_ptr = memory_allocation(sizeof(struct swap_buffer));
138 if (!queue_write_start_ptr) {
140 memory_free(queue_busy);
144 queue_write_end_ptr = queue_write_start_ptr;
146 queue_write_end_ptr->next_in_queue = NULL;
147 queue_write_end_ptr->full_buffer_part = 0;
148 queue_write_end_ptr->buffer = buffer_allocation(queue_subbuffer_size);
149 if (!queue_write_end_ptr->buffer) {
150 print_err("Cannot allocate memory for buffer 1\n");
152 memory_free(queue_busy);
153 memory_free(queue_write_start_ptr);
154 queue_write_start_ptr = NULL;
160 print_msg(" Buffer allocated = 0x%x\n", (unsigned long)queue_write_end_ptr->buffer);
162 if (buffer_rw_init(&queue_write_end_ptr->buffer_sync) != 0) {
164 memory_free(queue_busy);
166 memory_free(queue_write_start_ptr);
167 queue_write_start_ptr = NULL;
171 /* Buffer initialization */
172 memset(buffer_address(queue_write_end_ptr->buffer), 0, queue_subbuffer_size);
174 /* Allocation for other structures. */
175 for (i = 1; i < queue_subbuffer_count; i++) {
176 queue_write_end_ptr->next_in_queue = memory_allocation(sizeof(struct swap_buffer));
177 if (!queue_write_end_ptr->next_in_queue) {
178 /* Free all previously allocated memory */
180 struct swap_buffer *clean_tmp_struct = queue_write_start_ptr;
183 for (j = 0; j < i; j++) {
184 clean_tmp_struct = queue_write_start_ptr;
185 if (queue_write_start_ptr != queue_write_end_ptr) {
186 queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
188 buffer_free(clean_tmp_struct->buffer,queue_subbuffer_size);
189 memory_free(clean_tmp_struct);
191 queue_write_end_ptr = NULL;
192 queue_write_start_ptr = NULL;
193 memory_free(queue_busy);
198 /* Now next queue_write_end_ptr is next */
199 queue_write_end_ptr = queue_write_end_ptr->next_in_queue;
201 queue_write_end_ptr->next_in_queue = NULL;
202 queue_write_end_ptr->full_buffer_part = 0;
203 queue_write_end_ptr->buffer = buffer_allocation(queue_subbuffer_size);
204 if (!queue_write_end_ptr->buffer) {
205 /* Free all previously allocated memory */
207 struct swap_buffer *clean_tmp_struct = queue_write_start_ptr;
210 print_err("Cannot allocate memory for buffer %d\n", i+1);
212 for (j = 0; j < i; j++) {
213 clean_tmp_struct = queue_write_start_ptr;
214 if (queue_write_start_ptr != queue_write_end_ptr) {
215 queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
216 buffer_free(clean_tmp_struct->buffer, queue_subbuffer_size);
218 memory_free(clean_tmp_struct);
220 queue_write_end_ptr = NULL;
221 queue_write_start_ptr = NULL;
222 memory_free(queue_busy);
227 print_msg(" Buffer allocated = 0x%x, pages_order = %d\n", (unsigned long)queue_write_end_ptr->buffer, pages_order_in_subbuffer);
229 if (buffer_rw_init(&queue_write_end_ptr->buffer_sync) != 0) {
230 /* Free all previously allocated memory */
232 struct swap_buffer *clean_tmp_struct = queue_write_start_ptr;
235 for (j = 0; j < i; j++) {
236 clean_tmp_struct = queue_write_start_ptr;
237 if (queue_write_start_ptr != queue_write_end_ptr) {
238 queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
240 buffer_free(clean_tmp_struct->buffer, queue_subbuffer_size);
241 memory_free(clean_tmp_struct);
243 queue_write_end_ptr = NULL;
244 queue_write_start_ptr = NULL;
245 memory_free(queue_busy);
250 /* Buffer initialization */
251 memset(buffer_address(queue_write_end_ptr->buffer), 0,
252 queue_subbuffer_size);
258 int buffer_queue_free(void)
261 struct swap_buffer* tmp = NULL;
264 * <0 - set_all_to_read_list() error
267 //TODO Lock read list semaphore to prevent getting subbuffer from read list
268 /* Set all write buffers to read list */
269 result = set_all_to_read_list();
275 /* Free buffers and structures memory that are in read list */
276 while (queue_read_start_ptr) {
277 tmp = queue_read_start_ptr;
278 queue_read_start_ptr = queue_read_start_ptr->next_in_queue;
279 buffer_free(tmp->buffer, queue_subbuffer_size);
284 memory_free(queue_busy);
287 queue_subbuffer_size = 0;
288 queue_subbuffer_count = 0;
289 queue_read_start_ptr = NULL;
290 queue_read_end_ptr = NULL;
291 queue_write_start_ptr = NULL;
292 queue_write_end_ptr = NULL;
297 static unsigned int is_buffer_enough(struct swap_buffer* subbuffer, size_t size)
299 return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
302 /* Get first subbuffer from read list */
303 struct swap_buffer* get_from_read_list(void)
305 struct swap_buffer* result = NULL;
307 /* Lock read sync primitive */
308 if (buffer_access_lock(&buffer_read_sync)) {
312 if (queue_read_start_ptr == NULL) {
314 goto get_from_read_list_unlock;
317 result = queue_read_start_ptr;
319 /* If this is the last readable buffer, queue_read_start_ptr next time will
320 * points to NULL and that case is handled in the beginning of function
322 if (queue_read_start_ptr == queue_read_end_ptr) {
323 queue_read_end_ptr = NULL;
325 queue_read_start_ptr = queue_read_start_ptr->next_in_queue;
327 get_from_read_list_unlock:
328 /* Unlock read sync primitive */
329 if (buffer_access_unlock(&buffer_read_sync)) {
336 /* Add subbuffer to read list */
337 int add_to_read_list(struct swap_buffer* subbuffer)
343 * 2 - cannot unlock */
345 /* Lock read sync primitive */
346 if (buffer_access_lock(&buffer_read_sync)) {
351 // TODO Sanitization?
352 if (!queue_read_start_ptr) {
353 queue_read_start_ptr = subbuffer;
356 if (queue_read_end_ptr) {
357 queue_read_end_ptr->next_in_queue = subbuffer;
359 queue_read_end_ptr = queue_read_end_ptr->next_in_queue;
361 queue_read_end_ptr = subbuffer;
363 queue_read_end_ptr->next_in_queue = NULL;
365 /* Unlock read sync primitive */
366 if (buffer_access_unlock(&buffer_read_sync)) {
374 /* Call add to read list and callback function from driver module */
375 int add_to_read_list_with_callback(struct swap_buffer* subbuffer)
379 result = add_to_read_list(subbuffer);
380 // TODO Handle ret value
381 swap_buffer_callback(subbuffer);
386 /* Get first writable subbuffer from write list */
387 struct swap_buffer* get_from_write_list(size_t size)
389 struct swap_buffer *result = NULL;
391 /* Callbacks are called at the end of the function to prevent deadlocks */
392 struct swap_buffer *queue_callback_start_ptr = NULL;
393 struct swap_buffer *queue_callback_end_ptr = NULL;
394 struct swap_buffer *tmp_buffer = NULL;
396 /* Lock write list sync primitive */
397 if (buffer_access_lock(&buffer_write_sync)) {
401 while (queue_write_start_ptr) {
402 /* If start points to NULL => list is empty => exit */
403 if (!queue_write_start_ptr) {
405 goto get_from_write_list_unlock;
408 /* Get semaphore value. Useful only if we want buffer to write to
409 * several buffers the same time
411 * We're trying to lock semaphore, and if it is successful, unlocking
412 * it. Otherwise, going to the next step. */
413 if (buffer_rw_lock(&queue_write_start_ptr->buffer_sync) != 0) {
414 // TODO HOW? HOW is it possible to get there?!
415 result = queue_write_start_ptr;
416 /* If we reached end of the list */
417 if (queue_write_start_ptr == queue_write_end_ptr) {
418 queue_write_end_ptr = NULL;
420 /* Move start write pointer */
421 queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
423 /* Add to callback list */
424 if (!queue_callback_start_ptr) {
425 queue_callback_start_ptr = result;
427 if (queue_callback_end_ptr) {
428 queue_callback_end_ptr->next_in_queue = result;
430 queue_callback_end_ptr = result;
431 queue_callback_end_ptr->next_in_queue = NULL;
436 buffer_rw_unlock(&queue_write_start_ptr->buffer_sync);
440 if (is_buffer_enough(queue_write_start_ptr, size)) {
441 result = queue_write_start_ptr;
444 /* If size is not enough, subbuffers goes to read list */
445 result = queue_write_start_ptr;
446 /* If we reached end of the list */
447 if (queue_write_start_ptr == queue_write_end_ptr) {
448 queue_write_end_ptr = NULL;
450 /* Move start write pointer */
451 queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
453 /* Add to callback list */
454 if (!queue_callback_start_ptr) {
455 queue_callback_start_ptr = result;
457 if (queue_callback_end_ptr) {
458 queue_callback_end_ptr->next_in_queue = result;
460 queue_callback_end_ptr = result;
461 queue_callback_end_ptr->next_in_queue = NULL;
467 /* Lock writing semaphore */
469 if (buffer_rw_lock(&result->buffer_sync)) {
471 goto get_from_write_list_unlock;
475 get_from_write_list_unlock:
476 /* Unlock write list sync primitive */
477 if (buffer_access_unlock(&buffer_write_sync)) {
479 buffer_rw_unlock(&result->buffer_sync);
484 /* Adding buffers to read list and calling callbacks */
485 for (tmp_buffer = NULL; queue_callback_start_ptr; ) {
487 if (queue_callback_start_ptr == queue_callback_end_ptr) {
488 queue_callback_end_ptr = NULL;
490 tmp_buffer = queue_callback_start_ptr;
491 queue_callback_start_ptr = queue_callback_start_ptr->next_in_queue;
493 add_to_read_list_with_callback(tmp_buffer);
499 /* Add subbuffer to write list */
500 int add_to_write_list(struct swap_buffer* subbuffer)
504 * -2 - cannot unlock */
506 if (buffer_access_lock(&buffer_write_sync)) {
511 memset(buffer_address(subbuffer->buffer), 0, queue_subbuffer_size);
512 subbuffer->full_buffer_part = 0;
514 if (!queue_write_start_ptr) {
515 queue_write_start_ptr = subbuffer;
518 if (queue_write_end_ptr) {
519 queue_write_end_ptr->next_in_queue = subbuffer;
520 queue_write_end_ptr = queue_write_end_ptr->next_in_queue;
522 queue_write_end_ptr = subbuffer;
524 queue_write_end_ptr->next_in_queue = NULL;
526 if (buffer_access_unlock(&buffer_write_sync)) {
533 /* Add subbuffer to busy list when it is read from out of the buffer */
534 int add_to_busy_list(struct swap_buffer* subbuffer)
538 * -2 - cannot unlock */
540 /* Lock busy sync primitive */
541 if (buffer_access_lock(&buffer_busy_sync)) {
545 subbuffer->next_in_queue = NULL;
546 queue_busy[queue_busy_last_element] = subbuffer;
547 queue_busy_last_element += 1;
549 /* Unlock busy sync primitive */
550 if (buffer_access_unlock(&buffer_busy_sync)) {
557 /* Remove subbuffer from busy list when it is released */
558 int remove_from_busy_list(struct swap_buffer* subbuffer)
560 int result = -1; // For sanitization
564 * -1 - no such buffer in queue_busy list
569 /* Lock busy list sync primitive */
570 if (buffer_access_lock(&buffer_busy_sync)) {
575 /* Sanitization and removing */
576 for (i = 0; i < queue_busy_last_element; i++) {
577 if (queue_busy[i] == subbuffer) {
578 /* Last element goes here and length is down 1 */
579 queue_busy[i] = queue_busy[queue_busy_last_element - 1];
580 queue_busy_last_element -= 1;
586 /* Unlock busy list sync primitive */
587 if (buffer_access_unlock(&buffer_busy_sync)) {
595 /* Get subbuffers count in read list */
596 /* XXX Think about locks */
597 int get_full_buffers_count(void)
600 struct swap_buffer* buffer = queue_read_start_ptr;
602 /* >=0 - buffers count
605 while (buffer && buffer->full_buffer_part) {
607 buffer = buffer->next_in_queue;
613 /* Set all subbuffers in write list to read list */
614 int set_all_to_read_list(void)
617 struct swap_buffer *buffer = queue_write_start_ptr;
620 * -1 - sem_wait() error
621 * -2 - sem_post() error
622 * -3 - problems with locking sync primitives
623 * -4 - problems with unlocking sync primitives
626 /* Locking write sync primitive */
627 if (buffer_access_lock(&buffer_write_sync)) {
632 while (queue_write_start_ptr) {
633 /* Waiting till semaphore should be posted */
635 // TODO To think: It's not bad as it is, but maybe it would be better locking
636 // semaphore while changing its list? (Not bad now, cause buffer should have
637 // already been stopped).
639 if (buffer_rw_lock(&buffer->buffer_sync)) {
641 goto set_all_to_read_list_unlock;
644 if (buffer_rw_unlock(&buffer->buffer_sync)) {
646 goto set_all_to_read_list_unlock;
649 buffer = queue_write_start_ptr;
651 /* If we reached end of the list */
652 if (queue_write_start_ptr == queue_write_end_ptr) {
653 queue_write_end_ptr = NULL;
655 queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
657 add_to_read_list(buffer);
660 set_all_to_read_list_unlock:
661 /* Unlocking write primitive */
662 if (buffer_access_unlock(&buffer_write_sync)) {
668 /* Get subbuffers count in busy list */
669 /* XXX Think abount lock */
670 int get_busy_buffers_count(void)
672 return queue_busy_last_element;
675 /* Get memory pages count in subbuffer */
676 int get_pages_in_subbuffer(void)
678 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
679 return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;