3 * modules/buffer/buffer_queue.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2013
21 * 2013 Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
25 /* SWAP buffer queues implementation */
27 /* For all memory allocation/deallocation operations, except buffer memory
28 * allocation/deallocation should be used
29 * memory_allocation(size_t memory_size)
30 * memory_free(void *ptr)
32 * For subbuffer allocation/deallocation operations should be used
33 * buffer_allocation(size_t subbuffer_size)
34 * buffer_free(void *ptr, size_t subbuffer_size)
35 * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36 * use the following define:
37 * buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38 * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39 * It will be ok for user space, but fail in kernel space.
41 * See space_dep_types_and_def.h for details */
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
48 #include "kernel_operations.h"
50 /* Queue structure. Consist of pointers to the first and the last elements of
53 struct swap_subbuffer *start_ptr;
54 struct swap_subbuffer *end_ptr;
55 struct sync_t queue_sync;
59 struct queue write_queue = {
68 struct queue read_queue = {
76 /* Pointers array. Points to busy buffers */
77 static struct swap_subbuffer **queue_busy = NULL;
79 /* Store last busy element */
80 static unsigned int queue_busy_last_element;
82 /* Subbuffers count */
83 static unsigned int queue_subbuffer_count = 0;
85 /* One subbuffer size */
86 static size_t queue_subbuffer_size = 0;
89 static struct sync_t buffer_busy_sync = {
93 /* Memory pages count in one subbuffer */
94 static int pages_order_in_subbuffer = 0;
97 int buffer_queue_allocation(size_t subbuffer_size,
98 unsigned int subbuffers_count)
102 unsigned int allocated_buffers = 0;
103 unsigned int allocated_structs = 0;
104 struct swap_subbuffer *clean_tmp_struct;
107 /* Static varibles initialization */
108 queue_subbuffer_size = subbuffer_size;
109 queue_subbuffer_count = subbuffers_count;
110 queue_busy_last_element = 0;
112 /* Set variable pages_in_subbuffer. It is used for allocation and
113 * deallocation memory pages and its value is returned from
114 * swap_buffer_get() and contains page count in one subbuffer.
115 * All this useful only in kernel space. In userspace it is dummy.*/
116 set_pages_order_in_subbuffer(queue_subbuffer_size);
117 /* Sync primitives initialization */
118 sync_init(&read_queue.queue_sync);
119 sync_init(&write_queue.queue_sync);
120 sync_init(&buffer_busy_sync);
122 /* Memory allocation for queue_busy */
123 queue_busy = memory_allocation(sizeof(*queue_busy) * queue_subbuffer_count);
126 result = -E_SB_NO_MEM_QUEUE_BUSY;
127 goto buffer_allocation_error_ret;
130 /* Memory allocation for swap_subbuffer structures */
132 /* Allocation for first structure. */
133 write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
135 if (!write_queue.start_ptr) {
136 result = -E_SB_NO_MEM_BUFFER_STRUCT;
137 goto buffer_allocation_queue_busy_free;
142 write_queue.end_ptr = write_queue.start_ptr;
144 write_queue.end_ptr->next_in_queue = NULL;
145 write_queue.end_ptr->full_buffer_part = 0;
146 write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
147 if (!write_queue.end_ptr->data_buffer) {
148 print_err("Cannot allocate memory for buffer 1\n");
149 result = -E_SB_NO_MEM_DATA_BUFFER;
150 goto buffer_allocation_error_free;
154 sync_init(&write_queue.end_ptr->buffer_sync);
156 /* Buffer initialization */
157 memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
159 /* Allocation for other structures. */
160 for (i = 1; i < queue_subbuffer_count; i++) {
161 write_queue.end_ptr->next_in_queue =
162 memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
163 if (!write_queue.end_ptr->next_in_queue) {
164 result = -E_SB_NO_MEM_BUFFER_STRUCT;
165 goto buffer_allocation_error_free;
169 /* Now next write_queue.end_ptr is next */
170 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
172 write_queue.end_ptr->next_in_queue = NULL;
173 write_queue.end_ptr->full_buffer_part = 0;
174 write_queue.end_ptr->data_buffer =
175 buffer_allocation(queue_subbuffer_size);
176 if (!write_queue.end_ptr->data_buffer) {
177 result = -E_SB_NO_MEM_DATA_BUFFER;
178 goto buffer_allocation_error_free;
182 sync_init(&write_queue.end_ptr->buffer_sync);
184 /* Buffer initialization */
185 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
186 queue_subbuffer_size);
191 /* In case of errors, this code is called */
192 /* Free all previously allocated memory */
193 buffer_allocation_error_free:
194 clean_tmp_struct = write_queue.start_ptr;
196 for (j = 0; j < allocated_structs; j++) {
197 clean_tmp_struct = write_queue.start_ptr;
198 if (allocated_buffers) {
199 buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
202 if (write_queue.start_ptr != write_queue.end_ptr)
203 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
204 memory_free(clean_tmp_struct);
206 write_queue.end_ptr = NULL;
207 write_queue.start_ptr = NULL;
209 buffer_allocation_queue_busy_free:
210 memory_free(queue_busy);
213 buffer_allocation_error_ret:
217 int buffer_queue_reset(void)
219 struct swap_subbuffer *buffer = read_queue.start_ptr;
221 /* Check if there are some subbuffers in busy list. If so - return error */
222 if (get_busy_buffers_count())
223 return -E_SB_UNRELEASED_BUFFERS;
225 /* Lock read sync primitive */
226 sync_lock(&read_queue.queue_sync);
228 /* Set all subbuffers in read list to write list and reinitialize them */
229 while (read_queue.start_ptr) {
231 /* Lock buffer sync primitive to prevent writing to buffer if it had
232 * been selected for writing, but still wasn't wrote. */
233 sync_lock(&buffer->buffer_sync);
235 buffer = read_queue.start_ptr;
237 /* If we reached end of the list */
238 if (read_queue.start_ptr == read_queue.end_ptr) {
239 read_queue.end_ptr = NULL;
241 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
243 /* Reinit full buffer part */
244 buffer->full_buffer_part = 0;
246 add_to_write_list(buffer);
248 /* Unlock buffer sync primitive */
249 sync_unlock(&buffer->buffer_sync);
252 /* Unlock read primitive */
253 sync_unlock(&read_queue.queue_sync);
258 void buffer_queue_free(void)
260 struct swap_subbuffer *tmp = NULL;
262 /* Lock all sync primitives to prevet accessing free memory */
263 sync_lock(&write_queue.queue_sync);
264 sync_lock(&read_queue.queue_sync);
265 sync_lock(&buffer_busy_sync);
267 /* Free buffers and structures memory that are in read list */
268 while (read_queue.start_ptr) {
269 tmp = read_queue.start_ptr;
270 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
271 buffer_free(tmp->data_buffer, queue_subbuffer_size);
275 /* Free buffers and structures memory that are in read list */
276 while (write_queue.start_ptr) {
277 tmp = write_queue.start_ptr;
278 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
279 buffer_free(tmp->data_buffer, queue_subbuffer_size);
284 memory_free(queue_busy);
287 queue_subbuffer_size = 0;
288 queue_subbuffer_count = 0;
289 read_queue.start_ptr = NULL;
290 read_queue.end_ptr = NULL;
291 write_queue.start_ptr = NULL;
292 write_queue.end_ptr = NULL;
294 /* Unlock all sync primitives */
295 sync_unlock(&buffer_busy_sync);
296 sync_unlock(&read_queue.queue_sync);
297 sync_unlock(&write_queue.queue_sync);
300 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
303 /* XXX Think about checking full_buffer_part for correctness
304 * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
305 * chaning, etc.) this function should be true! */
306 return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
309 /* Get first subbuffer from read list */
310 struct swap_subbuffer *get_from_read_list(void)
312 struct swap_subbuffer *result = NULL;
314 /* Lock read sync primitive */
315 sync_lock(&read_queue.queue_sync);
317 if (read_queue.start_ptr == NULL) {
319 goto get_from_read_list_unlock;
322 result = read_queue.start_ptr;
324 /* If this is the last readable buffer, read_queue.start_ptr next time will
325 * points to NULL and that case is handled in the beginning of function
327 if (read_queue.start_ptr == read_queue.end_ptr) {
328 read_queue.end_ptr = NULL;
330 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
332 get_from_read_list_unlock:
333 /* Unlock read sync primitive */
334 sync_unlock(&read_queue.queue_sync);
339 /* Add subbuffer to read list */
340 void add_to_read_list(struct swap_subbuffer *subbuffer)
343 /* Lock read sync primitive */
344 sync_lock(&read_queue.queue_sync);
346 if (!read_queue.start_ptr)
347 read_queue.start_ptr = subbuffer;
349 if (read_queue.end_ptr) {
350 read_queue.end_ptr->next_in_queue = subbuffer;
352 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
354 read_queue.end_ptr = subbuffer;
356 read_queue.end_ptr->next_in_queue = NULL;
358 /* Unlock read sync primitive */
359 sync_unlock(&read_queue.queue_sync);
362 /* Call add to read list and callback function from driver module */
363 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
367 add_to_read_list(subbuffer);
368 // TODO Handle ret value
369 result = swap_buffer_callback(subbuffer);
374 /* Get first writable subbuffer from write list */
375 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
377 struct swap_subbuffer *result = NULL;
379 /* Callbacks are called at the end of the function to prevent deadlocks */
380 struct queue callback_queue = {
387 struct swap_subbuffer *tmp_buffer = NULL;
390 *ptr_to_write = NULL;
392 /* Lock write list sync primitive */
393 sync_lock(&write_queue.queue_sync);
395 while (write_queue.start_ptr) {
397 /* We're found subbuffer */
398 if (is_buffer_enough(write_queue.start_ptr, size)) {
400 result = write_queue.start_ptr;
401 *ptr_to_write = (void *)((unsigned long)
402 (buffer_address(result->data_buffer)) +
403 result->full_buffer_part);
405 /* Add data size to full_buffer_part. Very important to do it in
406 * write_queue.queue_sync spinlock */
407 write_queue.start_ptr->full_buffer_part += size;
409 /* Lock rw sync. Should be unlocked in swap_buffer_write() */
410 sync_lock(&result->buffer_sync);
412 /* This subbuffer is not enough => it goes to read list */
414 result = write_queue.start_ptr;
416 /* If we reached end of the list */
417 if (write_queue.start_ptr == write_queue.end_ptr) {
418 write_queue.end_ptr = NULL;
421 /* Move start write pointer */
422 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
424 /* Add to callback list */
425 if (!callback_queue.start_ptr)
426 callback_queue.start_ptr = result;
428 if (callback_queue.end_ptr)
429 callback_queue.end_ptr->next_in_queue = result;
430 callback_queue.end_ptr = result;
431 callback_queue.end_ptr->next_in_queue = NULL;
436 /* Unlock write list sync primitive */
437 sync_unlock(&write_queue.queue_sync);
439 /* Adding buffers to read list and calling callbacks */
440 for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
441 if (callback_queue.start_ptr == callback_queue.end_ptr)
442 callback_queue.end_ptr = NULL;
444 tmp_buffer = callback_queue.start_ptr;
445 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
447 add_to_read_list_with_callback(tmp_buffer);
453 /* Add subbuffer to write list */
454 void add_to_write_list(struct swap_subbuffer *subbuffer)
456 sync_lock(&write_queue.queue_sync);
459 // TODO Useless memset
460 // memset(buffer_address(subbuffer->data_buffer), 0, queue_subbuffer_size);
461 subbuffer->full_buffer_part = 0;
463 if (!write_queue.start_ptr)
464 write_queue.start_ptr = subbuffer;
466 if (write_queue.end_ptr) {
467 write_queue.end_ptr->next_in_queue = subbuffer;
468 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
470 write_queue.end_ptr = subbuffer;
472 write_queue.end_ptr->next_in_queue = NULL;
474 sync_unlock(&write_queue.queue_sync);
477 /* Add subbuffer to busy list when it is read from out of the buffer */
478 void add_to_busy_list(struct swap_subbuffer *subbuffer)
480 /* Lock busy sync primitive */
481 sync_lock(&buffer_busy_sync);
483 subbuffer->next_in_queue = NULL;
484 queue_busy[queue_busy_last_element] = subbuffer;
485 queue_busy_last_element += 1;
487 /* Unlock busy sync primitive */
488 sync_unlock(&buffer_busy_sync);
491 /* Remove subbuffer from busy list when it is released */
492 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
494 int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
497 /* Lock busy list sync primitive */
498 sync_lock(&buffer_busy_sync);
500 /* Sanitization and removing */
501 for (i = 0; i < queue_busy_last_element; i++) {
502 if (queue_busy[i] == subbuffer) {
503 /* Last element goes here and length is down 1 */
504 queue_busy[i] = queue_busy[queue_busy_last_element - 1];
505 queue_busy_last_element -= 1;
506 result = E_SB_SUCCESS;
511 /* Unlock busy list sync primitive */
512 sync_unlock(&buffer_busy_sync);
517 /* Get subbuffers count in read list */
518 /* XXX Think about locks */
519 int get_full_buffers_count(void)
522 struct swap_subbuffer *buffer = read_queue.start_ptr;
524 while (buffer && buffer->full_buffer_part) {
526 buffer = buffer->next_in_queue;
532 /* Set all subbuffers in write list to read list */
533 void buffer_queue_flush(void)
535 struct swap_subbuffer *buffer = write_queue.start_ptr;
537 /* Locking write sync primitive */
538 sync_lock(&write_queue.queue_sync);
540 while (write_queue.start_ptr &&
541 write_queue.start_ptr->full_buffer_part) {
543 /* Lock buffer sync primitive to prevent writing to buffer if it had
544 * been selected for writing, but still wasn't wrote. */
545 sync_lock(&buffer->buffer_sync);
547 buffer = write_queue.start_ptr;
549 /* If we reached end of the list */
550 if (write_queue.start_ptr == write_queue.end_ptr) {
551 write_queue.end_ptr = NULL;
553 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
555 add_to_read_list(buffer);
557 /* Unlock buffer sync primitive */
558 sync_unlock(&buffer->buffer_sync);
561 /* Unlock write primitive */
562 sync_unlock(&write_queue.queue_sync);
565 /* Get subbuffers count in busy list */
566 int get_busy_buffers_count(void)
570 sync_lock(&buffer_busy_sync);
571 result = queue_busy_last_element;
572 sync_unlock(&buffer_busy_sync);
577 /* Get memory pages count in subbuffer */
578 int get_pages_count_in_subbuffer(void)
580 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
581 return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;