3 * modules/buffer/buffer_queue.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2013
21 * 2013 Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
25 /* SWAP buffer queues implementation */
27 /* For all memory allocation/deallocation operations, except buffer memory
28 * allocation/deallocation should be used
29 * memory_allocation(size_t memory_size)
30 * memory_free(void *ptr)
32 * For subbuffer allocation/deallocation operations should be used
33 * buffer_allocation(size_t subbuffer_size)
34 * buffer_free(void *ptr, size_t subbuffer_size)
35 * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36 * use the following define:
37 * buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38 * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39 * It will be ok for user space, but fail in kernel space.
41 * See space_dep_types_and_def.h for details */
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
49 /* Queue structure. Consist of pointers to the first and the last elements of
52 struct swap_subbuffer *start_ptr;
53 struct swap_subbuffer *end_ptr;
54 struct sync_t queue_sync;
58 struct queue write_queue = {
67 struct queue read_queue = {
75 /* Pointers array. Points to busy buffers */
76 static struct swap_subbuffer **queue_busy = NULL;
78 /* Store last busy element */
79 static unsigned int queue_busy_last_element;
81 /* Subbuffers count */
82 static unsigned int queue_subbuffer_count = 0;
84 /* One subbuffer size */
85 static size_t queue_subbuffer_size = 0;
88 static struct sync_t buffer_busy_sync = {
92 /* Memory pages count in one subbuffer */
93 static int pages_order_in_subbuffer = 0;
96 int buffer_queue_allocation(size_t subbuffer_size,
97 unsigned int subbuffers_count)
101 unsigned int allocated_buffers = 0;
102 unsigned int allocated_structs = 0;
103 struct swap_subbuffer *clean_tmp_struct;
106 /* Static varibles initialization */
107 queue_subbuffer_size = subbuffer_size;
108 queue_subbuffer_count = subbuffers_count;
109 queue_busy_last_element = 0;
111 /* Set variable pages_in_subbuffer. It is used for allocation and
112 * deallocation memory pages and its value is returned from
113 * swap_buffer_get() and contains page count in one subbuffer.
114 * All this useful only in kernel space. In userspace it is dummy.*/
115 set_pages_order_in_subbuffer(queue_subbuffer_size);
116 /* Sync primitives initialization */
117 sync_init(&read_queue.queue_sync);
118 sync_init(&write_queue.queue_sync);
119 sync_init(&buffer_busy_sync);
121 /* Memory allocation for queue_busy */
122 queue_busy = memory_allocation(sizeof(**queue_busy) * queue_subbuffer_count);
125 result = -E_SB_NO_MEM_QUEUE_BUSY;
126 goto buffer_allocation_error_ret;
129 /* Memory allocation for swap_subbuffer structures */
131 /* Allocation for first structure. */
132 write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
134 if (!write_queue.start_ptr) {
135 result = -E_SB_NO_MEM_BUFFER_STRUCT;
136 goto buffer_allocation_queue_busy_free;
141 write_queue.end_ptr = write_queue.start_ptr;
143 write_queue.end_ptr->next_in_queue = NULL;
144 write_queue.end_ptr->full_buffer_part = 0;
145 write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
146 if (!write_queue.end_ptr->data_buffer) {
147 print_err("Cannot allocate memory for buffer 1\n");
148 result = -E_SB_NO_MEM_DATA_BUFFER;
149 goto buffer_allocation_error_free;
153 print_msg(" Buffer allocated = 0x%p\n", write_queue.end_ptr->data_buffer);
155 sync_init(&write_queue.end_ptr->buffer_sync);
157 /* Buffer initialization */
158 memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
160 /* Allocation for other structures. */
161 for (i = 1; i < queue_subbuffer_count; i++) {
162 write_queue.end_ptr->next_in_queue =
163 memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
164 if (!write_queue.end_ptr->next_in_queue) {
165 result = -E_SB_NO_MEM_BUFFER_STRUCT;
166 goto buffer_allocation_error_free;
170 /* Now next write_queue.end_ptr is next */
171 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
173 write_queue.end_ptr->next_in_queue = NULL;
174 write_queue.end_ptr->full_buffer_part = 0;
175 write_queue.end_ptr->data_buffer =
176 buffer_allocation(queue_subbuffer_size);
177 if (!write_queue.end_ptr->data_buffer) {
178 result = -E_SB_NO_MEM_DATA_BUFFER;
179 goto buffer_allocation_error_free;
183 print_msg(" Buffer allocated = 0x%p, pages_order = %d\n",
184 write_queue.end_ptr->data_buffer,
185 pages_order_in_subbuffer);
187 sync_init(&write_queue.end_ptr->buffer_sync);
189 /* Buffer initialization */
190 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
191 queue_subbuffer_size);
196 /* In case of errors, this code is called */
197 /* Free all previously allocated memory */
198 buffer_allocation_error_free:
199 clean_tmp_struct = write_queue.start_ptr;
201 for (j = 0; j < allocated_structs; j++) {
202 clean_tmp_struct = write_queue.start_ptr;
203 if (allocated_buffers) {
204 buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
207 if (write_queue.start_ptr != write_queue.end_ptr)
208 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
209 memory_free(clean_tmp_struct);
211 write_queue.end_ptr = NULL;
212 write_queue.start_ptr = NULL;
214 buffer_allocation_queue_busy_free:
215 memory_free(queue_busy);
218 buffer_allocation_error_ret:
222 int buffer_queue_reset(void)
224 struct swap_subbuffer *buffer = read_queue.start_ptr;
226 /* Check if there are some subbuffers in busy list. If so - return error */
227 if (get_busy_buffers_count())
228 return -E_SB_UNRELEASED_BUFFERS;
230 /* Lock read sync primitive */
231 sync_lock(&read_queue.queue_sync);
233 /* Set all subbuffers in read list to write list and reinitialize them */
234 while (read_queue.start_ptr) {
236 /* Lock buffer sync primitive to prevent writing to buffer if it had
237 * been selected for writing, but still wasn't wrote. */
238 sync_lock(&buffer->buffer_sync);
240 buffer = read_queue.start_ptr;
242 /* If we reached end of the list */
243 if (read_queue.start_ptr == read_queue.end_ptr) {
244 read_queue.end_ptr = NULL;
246 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
248 /* Reinit full buffer part */
249 buffer->full_buffer_part = 0;
251 add_to_write_list(buffer);
253 /* Unlock buffer sync primitive */
254 sync_unlock(&buffer->buffer_sync);
257 /* Unlock read primitive */
258 sync_unlock(&read_queue.queue_sync);
263 void buffer_queue_free(void)
265 struct swap_subbuffer *tmp = NULL;
267 /* Lock all sync primitives to prevet accessing free memory */
268 sync_lock(&write_queue.queue_sync);
269 sync_lock(&read_queue.queue_sync);
270 sync_lock(&buffer_busy_sync);
272 /* Free buffers and structures memory that are in read list */
273 while (read_queue.start_ptr) {
274 tmp = read_queue.start_ptr;
275 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
276 buffer_free(tmp->data_buffer, queue_subbuffer_size);
277 print_msg(" Buffer free = 0x%x\n", (unsigned long)
282 /* Free buffers and structures memory that are in read list */
283 while (write_queue.start_ptr) {
284 tmp = write_queue.start_ptr;
285 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
286 buffer_free(tmp->data_buffer, queue_subbuffer_size);
287 print_msg(" Buffer free = 0x%x\n", (unsigned long)
293 memory_free(queue_busy);
296 queue_subbuffer_size = 0;
297 queue_subbuffer_count = 0;
298 read_queue.start_ptr = NULL;
299 read_queue.end_ptr = NULL;
300 write_queue.start_ptr = NULL;
301 write_queue.end_ptr = NULL;
303 /* Unlock all sync primitives */
304 sync_unlock(&buffer_busy_sync);
305 sync_unlock(&read_queue.queue_sync);
306 sync_unlock(&write_queue.queue_sync);
309 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
312 /* XXX Think about checking full_buffer_part for correctness
313 * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
314 * chaning, etc.) this function should be true! */
315 return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
318 /* Get first subbuffer from read list */
319 struct swap_subbuffer *get_from_read_list(void)
321 struct swap_subbuffer *result = NULL;
323 /* Lock read sync primitive */
324 sync_lock(&read_queue.queue_sync);
326 if (read_queue.start_ptr == NULL) {
328 goto get_from_read_list_unlock;
331 result = read_queue.start_ptr;
333 /* If this is the last readable buffer, read_queue.start_ptr next time will
334 * points to NULL and that case is handled in the beginning of function
336 if (read_queue.start_ptr == read_queue.end_ptr) {
337 read_queue.end_ptr = NULL;
339 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
341 get_from_read_list_unlock:
342 /* Unlock read sync primitive */
343 sync_unlock(&read_queue.queue_sync);
348 /* Add subbuffer to read list */
349 void add_to_read_list(struct swap_subbuffer *subbuffer)
352 /* Lock read sync primitive */
353 sync_lock(&read_queue.queue_sync);
355 if (!read_queue.start_ptr)
356 read_queue.start_ptr = subbuffer;
358 if (read_queue.end_ptr) {
359 read_queue.end_ptr->next_in_queue = subbuffer;
361 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
363 read_queue.end_ptr = subbuffer;
365 read_queue.end_ptr->next_in_queue = NULL;
367 /* Unlock read sync primitive */
368 sync_unlock(&read_queue.queue_sync);
371 /* Call add to read list and callback function from driver module */
372 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
376 add_to_read_list(subbuffer);
377 // TODO Handle ret value
378 result = swap_buffer_callback(subbuffer);
383 /* Get first writable subbuffer from write list */
384 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
386 struct swap_subbuffer *result = NULL;
388 /* Callbacks are called at the end of the function to prevent deadlocks */
389 struct queue callback_queue = {
396 struct swap_subbuffer *tmp_buffer = NULL;
399 *ptr_to_write = NULL;
401 /* Lock write list sync primitive */
402 sync_lock(&write_queue.queue_sync);
404 while (write_queue.start_ptr) {
405 /* If start points to NULL => list is empty => exit */
406 if (!write_queue.start_ptr) {
408 goto get_from_write_list_unlock;
411 /* We're found subbuffer */
412 if (is_buffer_enough(write_queue.start_ptr, size)) {
414 result = write_queue.start_ptr;
415 *ptr_to_write = (void *)((unsigned long)
416 (buffer_address(result->data_buffer)) +
417 result->full_buffer_part);
419 /* Add data size to full_buffer_part. Very important to do it in
420 * write_queue.queue_sync spinlock */
421 write_queue.start_ptr->full_buffer_part += size;
423 /* Lock rw sync. Should be unlocked in swap_buffer_write() */
424 sync_lock(&result->buffer_sync);
426 /* This subbuffer is not enough => it goes to read list */
428 result = write_queue.start_ptr;
430 /* If we reached end of the list */
431 if (write_queue.start_ptr == write_queue.end_ptr) {
432 write_queue.end_ptr = NULL;
435 /* Move start write pointer */
436 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
438 /* Add to callback list */
439 if (!callback_queue.start_ptr)
440 callback_queue.start_ptr = result;
442 if (callback_queue.end_ptr)
443 callback_queue.end_ptr->next_in_queue = result;
444 callback_queue.end_ptr = result;
445 callback_queue.end_ptr->next_in_queue = NULL;
450 get_from_write_list_unlock:
451 /* Unlock write list sync primitive */
452 sync_unlock(&write_queue.queue_sync);
454 /* Adding buffers to read list and calling callbacks */
455 for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
456 if (callback_queue.start_ptr == callback_queue.end_ptr)
457 callback_queue.end_ptr = NULL;
459 tmp_buffer = callback_queue.start_ptr;
460 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
462 add_to_read_list_with_callback(tmp_buffer);
468 /* Add subbuffer to write list */
469 void add_to_write_list(struct swap_subbuffer *subbuffer)
471 sync_lock(&write_queue.queue_sync);
474 // TODO Useless memset
475 // memset(buffer_address(subbuffer->data_buffer), 0, queue_subbuffer_size);
476 subbuffer->full_buffer_part = 0;
478 if (!write_queue.start_ptr)
479 write_queue.start_ptr = subbuffer;
481 if (write_queue.end_ptr) {
482 write_queue.end_ptr->next_in_queue = subbuffer;
483 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
485 write_queue.end_ptr = subbuffer;
487 write_queue.end_ptr->next_in_queue = NULL;
489 sync_unlock(&write_queue.queue_sync);
492 /* Add subbuffer to busy list when it is read from out of the buffer */
493 void add_to_busy_list(struct swap_subbuffer *subbuffer)
495 /* Lock busy sync primitive */
496 sync_lock(&buffer_busy_sync);
498 subbuffer->next_in_queue = NULL;
499 queue_busy[queue_busy_last_element] = subbuffer;
500 queue_busy_last_element += 1;
502 /* Unlock busy sync primitive */
503 sync_unlock(&buffer_busy_sync);
506 /* Remove subbuffer from busy list when it is released */
507 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
509 int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
512 /* Lock busy list sync primitive */
513 sync_lock(&buffer_busy_sync);
515 /* Sanitization and removing */
516 for (i = 0; i < queue_busy_last_element; i++) {
517 if (queue_busy[i] == subbuffer) {
518 /* Last element goes here and length is down 1 */
519 queue_busy[i] = queue_busy[queue_busy_last_element - 1];
520 queue_busy_last_element -= 1;
521 result = E_SB_SUCCESS;
526 /* Unlock busy list sync primitive */
527 sync_unlock(&buffer_busy_sync);
532 /* Get subbuffers count in read list */
533 /* XXX Think about locks */
534 int get_full_buffers_count(void)
537 struct swap_subbuffer *buffer = read_queue.start_ptr;
539 while (buffer && buffer->full_buffer_part) {
541 buffer = buffer->next_in_queue;
547 /* Set all subbuffers in write list to read list */
548 void buffer_queue_flush(void)
550 struct swap_subbuffer *buffer = write_queue.start_ptr;
552 /* Locking write sync primitive */
553 sync_lock(&write_queue.queue_sync);
555 while (write_queue.start_ptr &&
556 write_queue.start_ptr->full_buffer_part) {
558 /* Lock buffer sync primitive to prevent writing to buffer if it had
559 * been selected for writing, but still wasn't wrote. */
560 sync_lock(&buffer->buffer_sync);
562 buffer = write_queue.start_ptr;
564 /* If we reached end of the list */
565 if (write_queue.start_ptr == write_queue.end_ptr) {
566 write_queue.end_ptr = NULL;
568 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
570 add_to_read_list(buffer);
572 /* Unlock buffer sync primitive */
573 sync_unlock(&buffer->buffer_sync);
576 /* Unlock write primitive */
577 sync_unlock(&write_queue.queue_sync);
580 /* Get subbuffers count in busy list */
581 int get_busy_buffers_count(void)
585 sync_lock(&buffer_busy_sync);
586 result = queue_busy_last_element;
587 sync_unlock(&buffer_busy_sync);
592 /* Get memory pages count in subbuffer */
593 int get_pages_count_in_subbuffer(void)
595 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
596 return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;