3 * modules/buffer/buffer_queue.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2013
21 * 2013 Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
25 /* SWAP buffer queues implementation */
27 /* For all memory allocation/deallocation operations, except buffer memory
28 * allocation/deallocation should be used
29 * memory_allocation(size_t memory_size)
30 * memory_free(void *ptr)
32 * For subbuffer allocation/deallocation operations should be used
33 * buffer_allocation(size_t subbuffer_size)
34 * buffer_free(void *ptr, size_t subbuffer_size)
35 * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36 * use the following define:
37 * buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38 * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39 * It will be ok for user space, but fail in kernel space.
41 * See space_dep_types_and_def.h for details */
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
48 #include "kernel_operations.h"
50 /* Queue structure. Consist of pointers to the first and the last elements of
53 struct swap_subbuffer *start_ptr;
54 struct swap_subbuffer *end_ptr;
55 unsigned int subbuffers_count;
56 struct sync_t queue_sync;
60 struct queue_t write_queue = {
63 .subbuffers_count = 0,
70 struct queue_t read_queue = {
73 .subbuffers_count = 0,
79 /* Pointers array. Points to busy buffers */
80 static struct swap_subbuffer **queue_busy = NULL;
82 /* Store last busy element */
83 static unsigned int queue_busy_last_element;
85 /* Subbuffers count */
86 static unsigned int queue_subbuffer_count = 0;
88 /* One subbuffer size */
89 static size_t queue_subbuffer_size = 0;
92 static struct sync_t buffer_busy_sync = {
96 /* Memory pages count in one subbuffer */
97 static int pages_order_in_subbuffer = 0;
100 int buffer_queue_allocation(size_t subbuffer_size,
101 unsigned int subbuffers_count)
105 unsigned int allocated_buffers = 0;
106 unsigned int allocated_structs = 0;
107 struct swap_subbuffer *clean_tmp_struct;
110 /* Static varibles initialization */
111 queue_subbuffer_size = subbuffer_size;
112 queue_subbuffer_count = subbuffers_count;
113 queue_busy_last_element = 0;
115 /* Set variable pages_in_subbuffer. It is used for allocation and
116 * deallocation memory pages and its value is returned from
117 * swap_buffer_get() and contains page count in one subbuffer.
118 * All this useful only in kernel space. In userspace it is dummy.*/
119 set_pages_order_in_subbuffer(queue_subbuffer_size);
120 /* Sync primitives initialization */
121 sync_init(&read_queue.queue_sync);
122 sync_init(&write_queue.queue_sync);
123 sync_init(&buffer_busy_sync);
125 /* Memory allocation for queue_busy */
126 queue_busy = memory_allocation(sizeof(**queue_busy) * queue_subbuffer_count);
129 result = -E_SB_NO_MEM_QUEUE_BUSY;
130 goto buffer_allocation_error_ret;
133 /* Memory allocation for swap_subbuffer structures */
135 /* Allocation for first structure. */
136 write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
138 if (!write_queue.start_ptr) {
139 result = -E_SB_NO_MEM_BUFFER_STRUCT;
140 goto buffer_allocation_queue_busy_free;
145 write_queue.end_ptr = write_queue.start_ptr;
147 write_queue.end_ptr->next_in_queue = NULL;
148 write_queue.end_ptr->full_buffer_part = 0;
149 write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
150 if (!write_queue.end_ptr->data_buffer) {
151 print_err("Cannot allocate memory for buffer 1\n");
152 result = -E_SB_NO_MEM_DATA_BUFFER;
153 goto buffer_allocation_error_free;
157 sync_init(&write_queue.end_ptr->buffer_sync);
159 /* Buffer initialization */
160 memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
162 /* Allocation for other structures. */
163 for (i = 1; i < queue_subbuffer_count; i++) {
164 write_queue.end_ptr->next_in_queue =
165 memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
166 if (!write_queue.end_ptr->next_in_queue) {
167 result = -E_SB_NO_MEM_BUFFER_STRUCT;
168 goto buffer_allocation_error_free;
172 /* Now next write_queue.end_ptr is next */
173 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
175 write_queue.end_ptr->next_in_queue = NULL;
176 write_queue.end_ptr->full_buffer_part = 0;
177 write_queue.end_ptr->data_buffer =
178 buffer_allocation(queue_subbuffer_size);
179 if (!write_queue.end_ptr->data_buffer) {
180 result = -E_SB_NO_MEM_DATA_BUFFER;
181 goto buffer_allocation_error_free;
185 sync_init(&write_queue.end_ptr->buffer_sync);
187 /* Buffer initialization */
188 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
189 queue_subbuffer_size);
192 /* All subbuffers are in write list */
193 write_queue.subbuffers_count = subbuffers_count;
197 /* In case of errors, this code is called */
198 /* Free all previously allocated memory */
199 buffer_allocation_error_free:
200 clean_tmp_struct = write_queue.start_ptr;
202 for (j = 0; j < allocated_structs; j++) {
203 clean_tmp_struct = write_queue.start_ptr;
204 if (allocated_buffers) {
205 buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
208 if (write_queue.start_ptr != write_queue.end_ptr)
209 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
210 memory_free(clean_tmp_struct);
212 write_queue.end_ptr = NULL;
213 write_queue.start_ptr = NULL;
215 buffer_allocation_queue_busy_free:
216 memory_free(queue_busy);
219 buffer_allocation_error_ret:
223 int buffer_queue_reset(void)
225 struct swap_subbuffer *buffer = read_queue.start_ptr;
227 /* Check if there are some subbuffers in busy list. If so - return error */
228 if (get_busy_buffers_count())
229 return -E_SB_UNRELEASED_BUFFERS;
231 /* Lock read sync primitive */
232 sync_lock(&read_queue.queue_sync);
234 /* Set all subbuffers in read list to write list and reinitialize them */
235 while (read_queue.start_ptr) {
237 /* Lock buffer sync primitive to prevent writing to buffer if it had
238 * been selected for writing, but still wasn't wrote. */
239 sync_lock(&buffer->buffer_sync);
241 buffer = read_queue.start_ptr;
243 /* If we reached end of the list */
244 if (read_queue.start_ptr == read_queue.end_ptr) {
245 read_queue.end_ptr = NULL;
247 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
249 /* Reinit full buffer part */
250 buffer->full_buffer_part = 0;
252 add_to_write_list(buffer);
254 /* Unlock buffer sync primitive */
255 sync_unlock(&buffer->buffer_sync);
258 /* Unlock read primitive */
259 sync_unlock(&read_queue.queue_sync);
264 void buffer_queue_free(void)
266 struct swap_subbuffer *tmp = NULL;
268 /* Lock all sync primitives to prevet accessing free memory */
269 sync_lock(&write_queue.queue_sync);
270 sync_lock(&read_queue.queue_sync);
271 sync_lock(&buffer_busy_sync);
273 /* Free buffers and structures memory that are in read list */
274 while (read_queue.start_ptr) {
275 tmp = read_queue.start_ptr;
276 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
277 buffer_free(tmp->data_buffer, queue_subbuffer_size);
281 /* Free buffers and structures memory that are in read list */
282 while (write_queue.start_ptr) {
283 tmp = write_queue.start_ptr;
284 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
285 buffer_free(tmp->data_buffer, queue_subbuffer_size);
290 memory_free(queue_busy);
293 queue_subbuffer_size = 0;
294 queue_subbuffer_count = 0;
295 read_queue.start_ptr = NULL;
296 read_queue.end_ptr = NULL;
297 write_queue.start_ptr = NULL;
298 write_queue.end_ptr = NULL;
300 /* Unlock all sync primitives */
301 sync_unlock(&buffer_busy_sync);
302 sync_unlock(&read_queue.queue_sync);
303 sync_unlock(&write_queue.queue_sync);
306 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
309 /* XXX Think about checking full_buffer_part for correctness
310 * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
311 * chaning, etc.) this function should be true! */
312 return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
315 static void next_queue_element(struct queue_t *queue)
317 /* If we reached the last elemenet, end pointer should point to NULL */
318 if (queue->start_ptr == queue->end_ptr)
319 queue->end_ptr = NULL;
321 queue->start_ptr = queue->start_ptr->next_in_queue;
322 --queue->subbuffers_count;
325 /* Get first subbuffer from read list */
326 struct swap_subbuffer *get_from_read_list(void)
328 struct swap_subbuffer *result = NULL;
330 /* Lock read sync primitive */
331 sync_lock(&read_queue.queue_sync);
333 if (read_queue.start_ptr == NULL) {
335 goto get_from_read_list_unlock;
338 result = read_queue.start_ptr;
340 next_queue_element(&read_queue);
342 get_from_read_list_unlock:
343 /* Unlock read sync primitive */
344 sync_unlock(&read_queue.queue_sync);
349 /* Add subbuffer to read list */
350 void add_to_read_list(struct swap_subbuffer *subbuffer)
352 /* Lock read sync primitive */
353 sync_lock(&read_queue.queue_sync);
355 if (!read_queue.start_ptr)
356 read_queue.start_ptr = subbuffer;
358 if (read_queue.end_ptr) {
359 read_queue.end_ptr->next_in_queue = subbuffer;
361 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
363 read_queue.end_ptr = subbuffer;
365 read_queue.end_ptr->next_in_queue = NULL;
366 ++read_queue.subbuffers_count;
368 /* Unlock read sync primitive */
369 sync_unlock(&read_queue.queue_sync);
372 /* Call add to read list and callback function from driver module */
373 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
377 add_to_read_list(subbuffer);
378 // TODO Handle ret value
379 result = swap_buffer_callback(subbuffer);
384 /* Returns subbuffers to read count */
385 unsigned int get_readable_buf_cnt(void)
387 return read_queue.subbuffers_count;
391 /* Get first writable subbuffer from write list */
392 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
394 struct swap_subbuffer *result = NULL;
396 /* Callbacks are called at the end of the function to prevent deadlocks */
397 struct queue_t callback_queue = {
404 struct swap_subbuffer *tmp_buffer = NULL;
407 *ptr_to_write = NULL;
409 /* Lock write list sync primitive */
410 sync_lock(&write_queue.queue_sync);
412 while (write_queue.start_ptr) {
414 /* We're found subbuffer */
415 if (is_buffer_enough(write_queue.start_ptr, size)) {
417 result = write_queue.start_ptr;
418 *ptr_to_write = (void *)((unsigned long)
419 (buffer_address(result->data_buffer)) +
420 result->full_buffer_part);
422 /* Add data size to full_buffer_part. Very important to do it in
423 * write_queue.queue_sync spinlock */
424 write_queue.start_ptr->full_buffer_part += size;
426 /* Lock rw sync. Should be unlocked in swap_buffer_write() */
427 sync_lock(&result->buffer_sync);
429 /* This subbuffer is not enough => it goes to read list */
431 result = write_queue.start_ptr;
433 next_queue_element(&write_queue);
435 /* Add to callback list */
436 if (!callback_queue.start_ptr)
437 callback_queue.start_ptr = result;
439 if (callback_queue.end_ptr)
440 callback_queue.end_ptr->next_in_queue = result;
441 callback_queue.end_ptr = result;
442 callback_queue.end_ptr->next_in_queue = NULL;
447 /* Unlock write list sync primitive */
448 sync_unlock(&write_queue.queue_sync);
450 /* Adding buffers to read list and calling callbacks */
451 for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
452 if (callback_queue.start_ptr == callback_queue.end_ptr)
453 callback_queue.end_ptr = NULL;
455 tmp_buffer = callback_queue.start_ptr;
456 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
458 add_to_read_list_with_callback(tmp_buffer);
464 /* Add subbuffer to write list */
465 void add_to_write_list(struct swap_subbuffer *subbuffer)
467 sync_lock(&write_queue.queue_sync);
470 subbuffer->full_buffer_part = 0;
472 if (!write_queue.start_ptr)
473 write_queue.start_ptr = subbuffer;
475 if (write_queue.end_ptr) {
476 write_queue.end_ptr->next_in_queue = subbuffer;
477 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
479 write_queue.end_ptr = subbuffer;
481 write_queue.end_ptr->next_in_queue = NULL;
482 ++write_queue.subbuffers_count;
484 sync_unlock(&write_queue.queue_sync);
487 /* Returns subbuffers to write count */
488 unsigned int get_writable_buf_cnt(void)
490 return write_queue.subbuffers_count;
494 /* Add subbuffer to busy list when it is read from out of the buffer */
495 void add_to_busy_list(struct swap_subbuffer *subbuffer)
497 /* Lock busy sync primitive */
498 sync_lock(&buffer_busy_sync);
500 subbuffer->next_in_queue = NULL;
501 queue_busy[queue_busy_last_element] = subbuffer;
502 queue_busy_last_element += 1;
504 /* Unlock busy sync primitive */
505 sync_unlock(&buffer_busy_sync);
508 /* Remove subbuffer from busy list when it is released */
509 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
511 int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
514 /* Lock busy list sync primitive */
515 sync_lock(&buffer_busy_sync);
517 /* Sanitization and removing */
518 for (i = 0; i < queue_busy_last_element; i++) {
519 if (queue_busy[i] == subbuffer) {
520 /* Last element goes here and length is down 1 */
521 queue_busy[i] = queue_busy[queue_busy_last_element - 1];
522 queue_busy_last_element -= 1;
523 result = E_SB_SUCCESS;
528 /* Unlock busy list sync primitive */
529 sync_unlock(&buffer_busy_sync);
534 /* Set all subbuffers in write list to read list */
535 void buffer_queue_flush(void)
537 struct swap_subbuffer *buffer = write_queue.start_ptr;
539 /* Locking write sync primitive */
540 sync_lock(&write_queue.queue_sync);
542 while (write_queue.start_ptr &&
543 write_queue.start_ptr->full_buffer_part) {
545 /* Lock buffer sync primitive to prevent writing to buffer if it had
546 * been selected for writing, but still wasn't wrote. */
547 sync_lock(&buffer->buffer_sync);
549 buffer = write_queue.start_ptr;
550 next_queue_element(&write_queue);
551 add_to_read_list(buffer);
553 /* Unlock buffer sync primitive */
554 sync_unlock(&buffer->buffer_sync);
557 /* Unlock write primitive */
558 sync_unlock(&write_queue.queue_sync);
561 /* Get subbuffers count in busy list */
562 int get_busy_buffers_count(void)
566 sync_lock(&buffer_busy_sync);
567 result = queue_busy_last_element;
568 sync_unlock(&buffer_busy_sync);
573 /* Get memory pages count in subbuffer */
574 int get_pages_count_in_subbuffer(void)
576 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
577 return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;