2 * buffer/buffer_queue.c
3 * @author Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Copyright (C) Samsung Electronics, 2013
25 * @section DESCRIPTION
27 * Implements buffers queues interface
30 /* For all memory allocation/deallocation operations, except buffer memory
31 * allocation/deallocation should be used
32 * memory_allocation(size_t memory_size)
33 * memory_free(void *ptr)
35 * For subbuffer allocation/deallocation operations should be used
36 * buffer_allocation(size_t subbuffer_size)
37 * buffer_free(void *ptr, size_t subbuffer_size)
38 * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
39 * use the following define:
40 * buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
41 * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
42 * It will be ok for user space, but fail in kernel space.
44 * See space_dep_types_and_def.h for details */
48 #include "buffer_queue.h"
49 #include "swap_buffer_to_buffer_queue.h"
50 #include "swap_buffer_errors.h"
51 #include "kernel_operations.h"
55 * @brief Queue structure. Consist of pointers to the first and the last
57 * @var queue_t::start_ptr
58 * Pointer to the first subbuffer in queue
59 * @var queue_t::end_ptr
60 * Pointer to the last subbuffer in queue
61 * @var queue_t::subbuffers_count
62 * Subbuffers count in queue
63 * @var queue_t::queue_sync
64 * Queue access sync primitive
67 struct swap_subbuffer *start_ptr;
68 struct swap_subbuffer *end_ptr;
69 unsigned int subbuffers_count;
70 struct sync_t queue_sync;
75 * @brief Represents write queue.
77 struct queue_t write_queue = {
80 .subbuffers_count = 0,
88 * @brief Represents read queue.
90 struct queue_t read_queue = {
93 .subbuffers_count = 0,
99 /* Pointers array. Points to busy buffers */
100 static struct swap_subbuffer **queue_busy = NULL;
102 /* Store last busy element */
103 static unsigned int queue_busy_last_element;
105 /* Subbuffers count */
106 static unsigned int queue_subbuffer_count = 0;
108 /* One subbuffer size */
109 static size_t queue_subbuffer_size = 0;
112 static struct sync_t buffer_busy_sync = {
116 /* Memory pages count in one subbuffer */
117 static int pages_order_in_subbuffer = 0;
120 * @brief Allocates memory for swap_subbuffer structures and subbuffers.
121 * Total allocated memory = subbuffer_size * subbuffers_count.
123 * @param subbuffer_size Size of each subbuffer.
124 * @param subbuffers_count Count of subbuffers.
125 * @return 0 on success, negative error code otherwise.
127 int buffer_queue_allocation(size_t subbuffer_size,
128 unsigned int subbuffers_count)
132 unsigned int allocated_buffers = 0;
133 unsigned int allocated_structs = 0;
134 struct swap_subbuffer *clean_tmp_struct;
137 /* Static varibles initialization */
138 queue_subbuffer_size = subbuffer_size;
139 queue_subbuffer_count = subbuffers_count;
140 queue_busy_last_element = 0;
142 /* Set variable pages_in_subbuffer. It is used for allocation and
143 * deallocation memory pages and its value is returned from
144 * swap_buffer_get() and contains page count in one subbuffer.
145 * All this useful only in kernel space. In userspace it is dummy.*/
146 set_pages_order_in_subbuffer(queue_subbuffer_size);
147 /* Sync primitives initialization */
148 sync_init(&read_queue.queue_sync);
149 sync_init(&write_queue.queue_sync);
150 sync_init(&buffer_busy_sync);
152 /* Memory allocation for queue_busy */
153 queue_busy = memory_allocation(sizeof(*queue_busy) * queue_subbuffer_count);
156 result = -E_SB_NO_MEM_QUEUE_BUSY;
157 goto buffer_allocation_error_ret;
160 /* Memory allocation for swap_subbuffer structures */
162 /* Allocation for first structure. */
163 write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
165 if (!write_queue.start_ptr) {
166 result = -E_SB_NO_MEM_BUFFER_STRUCT;
167 goto buffer_allocation_queue_busy_free;
172 write_queue.end_ptr = write_queue.start_ptr;
174 write_queue.end_ptr->next_in_queue = NULL;
175 write_queue.end_ptr->full_buffer_part = 0;
176 write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
177 if (!write_queue.end_ptr->data_buffer) {
178 print_err("Cannot allocate memory for buffer 1\n");
179 result = -E_SB_NO_MEM_DATA_BUFFER;
180 goto buffer_allocation_error_free;
184 sync_init(&write_queue.end_ptr->buffer_sync);
186 /* Buffer initialization */
187 memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
189 /* Allocation for other structures. */
190 for (i = 1; i < queue_subbuffer_count; i++) {
191 write_queue.end_ptr->next_in_queue =
192 memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
193 if (!write_queue.end_ptr->next_in_queue) {
194 result = -E_SB_NO_MEM_BUFFER_STRUCT;
195 goto buffer_allocation_error_free;
199 /* Now next write_queue.end_ptr is next */
200 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
202 write_queue.end_ptr->next_in_queue = NULL;
203 write_queue.end_ptr->full_buffer_part = 0;
204 write_queue.end_ptr->data_buffer =
205 buffer_allocation(queue_subbuffer_size);
206 if (!write_queue.end_ptr->data_buffer) {
207 result = -E_SB_NO_MEM_DATA_BUFFER;
208 goto buffer_allocation_error_free;
212 sync_init(&write_queue.end_ptr->buffer_sync);
214 /* Buffer initialization */
215 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
216 queue_subbuffer_size);
219 /* All subbuffers are in write list */
220 write_queue.subbuffers_count = subbuffers_count;
224 /* In case of errors, this code is called */
225 /* Free all previously allocated memory */
226 buffer_allocation_error_free:
227 clean_tmp_struct = write_queue.start_ptr;
229 for (j = 0; j < allocated_structs; j++) {
230 clean_tmp_struct = write_queue.start_ptr;
231 if (allocated_buffers) {
232 buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
235 if (write_queue.start_ptr != write_queue.end_ptr)
236 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
237 memory_free(clean_tmp_struct);
239 write_queue.end_ptr = NULL;
240 write_queue.start_ptr = NULL;
242 buffer_allocation_queue_busy_free:
243 memory_free(queue_busy);
246 buffer_allocation_error_ret:
251 * @brief Resets all subbuffers for writing.
253 * @return 0 on success, negative error code otherwise.
255 int buffer_queue_reset(void)
257 struct swap_subbuffer *buffer = read_queue.start_ptr;
259 /* Check if there are some subbuffers in busy list. If so - return error */
260 if (get_busy_buffers_count())
261 return -E_SB_UNRELEASED_BUFFERS;
263 /* Lock read sync primitive */
264 sync_lock(&read_queue.queue_sync);
266 /* Set all subbuffers in read list to write list and reinitialize them */
267 while (read_queue.start_ptr) {
269 /* Lock buffer sync primitive to prevent writing to buffer if it had
270 * been selected for writing, but still wasn't wrote. */
271 sync_lock(&buffer->buffer_sync);
273 buffer = read_queue.start_ptr;
275 /* If we reached end of the list */
276 if (read_queue.start_ptr == read_queue.end_ptr) {
277 read_queue.end_ptr = NULL;
279 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
281 /* Reinit full buffer part */
282 buffer->full_buffer_part = 0;
284 add_to_write_list(buffer);
286 /* Unlock buffer sync primitive */
287 sync_unlock(&buffer->buffer_sync);
290 /* Unlock read primitive */
291 sync_unlock(&read_queue.queue_sync);
297 * @brief Free all allocated subbuffers.
301 void buffer_queue_free(void)
303 struct swap_subbuffer *tmp = NULL;
305 /* Lock all sync primitives to prevet accessing free memory */
306 sync_lock(&write_queue.queue_sync);
307 sync_lock(&read_queue.queue_sync);
308 sync_lock(&buffer_busy_sync);
310 /* Free buffers and structures memory that are in read list */
311 while (read_queue.start_ptr) {
312 tmp = read_queue.start_ptr;
313 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
314 buffer_free(tmp->data_buffer, queue_subbuffer_size);
318 /* Free buffers and structures memory that are in read list */
319 while (write_queue.start_ptr) {
320 tmp = write_queue.start_ptr;
321 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
322 buffer_free(tmp->data_buffer, queue_subbuffer_size);
327 memory_free(queue_busy);
330 queue_subbuffer_size = 0;
331 queue_subbuffer_count = 0;
332 read_queue.start_ptr = NULL;
333 read_queue.end_ptr = NULL;
334 write_queue.start_ptr = NULL;
335 write_queue.end_ptr = NULL;
337 /* Unlock all sync primitives */
338 sync_unlock(&buffer_busy_sync);
339 sync_unlock(&read_queue.queue_sync);
340 sync_unlock(&write_queue.queue_sync);
343 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
346 /* XXX Think about checking full_buffer_part for correctness
347 * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
348 * chaning, etc.) this function should be true! */
349 return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
352 static void next_queue_element(struct queue_t *queue)
354 /* If we reached the last elemenet, end pointer should point to NULL */
355 if (queue->start_ptr == queue->end_ptr)
356 queue->end_ptr = NULL;
358 queue->start_ptr = queue->start_ptr->next_in_queue;
359 --queue->subbuffers_count;
363 * @brief Get first subbuffer from read list.
365 * @return Pointer to swap_subbuffer
367 struct swap_subbuffer *get_from_read_list(void)
369 struct swap_subbuffer *result = NULL;
371 /* Lock read sync primitive */
372 sync_lock(&read_queue.queue_sync);
374 if (read_queue.start_ptr == NULL) {
376 goto get_from_read_list_unlock;
379 result = read_queue.start_ptr;
381 next_queue_element(&read_queue);
383 get_from_read_list_unlock:
384 /* Unlock read sync primitive */
385 sync_unlock(&read_queue.queue_sync);
391 * @brief Add subbuffer to read list.
393 * @param subbuffer Pointer to the subbuffer to add.
396 void add_to_read_list(struct swap_subbuffer *subbuffer)
398 /* Lock read sync primitive */
399 sync_lock(&read_queue.queue_sync);
401 if (!read_queue.start_ptr)
402 read_queue.start_ptr = subbuffer;
404 if (read_queue.end_ptr) {
405 read_queue.end_ptr->next_in_queue = subbuffer;
407 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
409 read_queue.end_ptr = subbuffer;
411 read_queue.end_ptr->next_in_queue = NULL;
412 ++read_queue.subbuffers_count;
414 /* Unlock read sync primitive */
415 sync_unlock(&read_queue.queue_sync);
419 * @brief Call add to read list and callback function from driver module.
421 * @param subbuffer Pointer to the subbuffer to add.
422 * @return swap_buffer_callback result.
424 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
428 add_to_read_list(subbuffer);
429 // TODO Handle ret value
430 result = swap_buffer_callback(subbuffer);
436 * @brief Returns subbuffers to read count.
438 * @return Count of subbuffers in read_queue.
440 unsigned int get_readable_buf_cnt(void)
442 return read_queue.subbuffers_count;
447 * @brief Get first writable subbuffer from write list.
449 * @param size Minimum amount of free space in subbuffer.
450 * @param[out] ptr_to_write Pointer to the variable where pointer to the beginning
451 * of memory for writing should be stored.
452 * @return Found swap_subbuffer.
454 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
456 struct swap_subbuffer *result = NULL;
458 /* Callbacks are called at the end of the function to prevent deadlocks */
459 struct queue_t callback_queue = {
466 struct swap_subbuffer *tmp_buffer = NULL;
469 *ptr_to_write = NULL;
471 /* Lock write list sync primitive */
472 sync_lock(&write_queue.queue_sync);
474 while (write_queue.start_ptr) {
476 /* We're found subbuffer */
477 if (is_buffer_enough(write_queue.start_ptr, size)) {
479 result = write_queue.start_ptr;
480 *ptr_to_write = (void *)((unsigned long)
481 (buffer_address(result->data_buffer)) +
482 result->full_buffer_part);
484 /* Add data size to full_buffer_part. Very important to do it in
485 * write_queue.queue_sync spinlock */
486 write_queue.start_ptr->full_buffer_part += size;
488 /* Lock rw sync. Should be unlocked in swap_buffer_write() */
489 sync_lock_no_flags(&result->buffer_sync);
491 /* This subbuffer is not enough => it goes to read list */
493 result = write_queue.start_ptr;
495 next_queue_element(&write_queue);
497 /* Add to callback list */
498 if (!callback_queue.start_ptr)
499 callback_queue.start_ptr = result;
501 if (callback_queue.end_ptr)
502 callback_queue.end_ptr->next_in_queue = result;
503 callback_queue.end_ptr = result;
504 callback_queue.end_ptr->next_in_queue = NULL;
509 /* Unlock write list sync primitive */
510 sync_unlock(&write_queue.queue_sync);
512 /* Adding buffers to read list and calling callbacks */
513 for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
514 if (callback_queue.start_ptr == callback_queue.end_ptr)
515 callback_queue.end_ptr = NULL;
517 tmp_buffer = callback_queue.start_ptr;
518 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
520 add_to_read_list_with_callback(tmp_buffer);
527 * @brief Add subbuffer to write list.
529 * @param subbuffer Pointer to the swap_subbuffer that should be stored.
532 void add_to_write_list(struct swap_subbuffer *subbuffer)
534 sync_lock(&write_queue.queue_sync);
537 subbuffer->full_buffer_part = 0;
539 if (!write_queue.start_ptr)
540 write_queue.start_ptr = subbuffer;
542 if (write_queue.end_ptr) {
543 write_queue.end_ptr->next_in_queue = subbuffer;
544 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
546 write_queue.end_ptr = subbuffer;
548 write_queue.end_ptr->next_in_queue = NULL;
549 ++write_queue.subbuffers_count;
551 sync_unlock(&write_queue.queue_sync);
555 * @brief Returns subbuffers to write count.
557 * @return Count of subbuffers in write queue.
559 unsigned int get_writable_buf_cnt(void)
561 return write_queue.subbuffers_count;
566 * @brief Add subbuffer to busy list when it is read from out of the buffer.
568 * @param subbuffer Pointer to the swap_subbuffer that should be added.
571 void add_to_busy_list(struct swap_subbuffer *subbuffer)
573 /* Lock busy sync primitive */
574 sync_lock(&buffer_busy_sync);
576 subbuffer->next_in_queue = NULL;
577 queue_busy[queue_busy_last_element] = subbuffer;
578 queue_busy_last_element += 1;
580 /* Unlock busy sync primitive */
581 sync_unlock(&buffer_busy_sync);
585 * @brief Remove subbuffer from busy list when it is released.
587 * @param subbuffer Pointer to the swap_subbuffer that should be removed.
588 * @return 0 on success, negative error code otherwise.
590 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
592 int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
595 /* Lock busy list sync primitive */
596 sync_lock(&buffer_busy_sync);
598 /* Sanitization and removing */
599 for (i = 0; i < queue_busy_last_element; i++) {
600 if (queue_busy[i] == subbuffer) {
601 /* Last element goes here and length is down 1 */
602 queue_busy[i] = queue_busy[queue_busy_last_element - 1];
603 queue_busy_last_element -= 1;
604 result = E_SB_SUCCESS;
609 /* Unlock busy list sync primitive */
610 sync_unlock(&buffer_busy_sync);
616 * @brief Set all subbuffers in write list to read list.
620 void buffer_queue_flush(void)
622 struct swap_subbuffer *buffer = write_queue.start_ptr;
624 /* Locking write sync primitive */
625 sync_lock(&write_queue.queue_sync);
627 while (write_queue.start_ptr &&
628 write_queue.start_ptr->full_buffer_part) {
630 /* Lock buffer sync primitive to prevent writing to buffer if it had
631 * been selected for writing, but still wasn't wrote. */
632 sync_lock(&buffer->buffer_sync);
634 buffer = write_queue.start_ptr;
635 next_queue_element(&write_queue);
636 add_to_read_list(buffer);
638 /* Unlock buffer sync primitive */
639 sync_unlock(&buffer->buffer_sync);
642 /* Unlock write primitive */
643 sync_unlock(&write_queue.queue_sync);
647 * @brief Get subbuffers count in busy list.
649 * @return Count of swap_subbuffers in busy list.
651 int get_busy_buffers_count(void)
655 sync_lock(&buffer_busy_sync);
656 result = queue_busy_last_element;
657 sync_unlock(&buffer_busy_sync);
663 * @brief Get memory pages count in subbuffer.
665 * @return Pages count in subbuffer.
667 int get_pages_count_in_subbuffer(void)
669 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
670 return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;