3 * modules/buffer/buffer_queue.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2013
21 * 2013 Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
25 /* SWAP buffer queues implementation */
27 /* For all memory allocation/deallocation operations, except buffer memory
28 * allocation/deallocation should be used
29 * memory_allocation(size_t memory_size)
30 * memory_free(void *ptr)
32 * For subbuffer allocation/deallocation operations should be used
33 * buffer_allocation(size_t subbuffer_size)
34 * buffer_free(void *ptr, size_t subbuffer_size)
35 * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36 * use the following define:
37 * buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38 * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39 * It will be ok for user space, but fail in kernel space.
41 * See space_dep_types_and_def.h for details */
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
49 /* Queue structure. Consist of pointers to the first and the last elements of
52 struct swap_subbuffer *start_ptr;
53 struct swap_subbuffer *end_ptr;
58 struct queue write_queue = {
64 struct queue read_queue = {
69 /* Pointers array. Points to busy buffers */
70 static struct swap_buffer **queue_busy = NULL;
72 /* Store last busy element */
73 static unsigned int queue_busy_last_element;
75 /* Subbuffers count */
76 static unsigned int queue_subbuffer_count = 0;
78 /* One subbuffer size */
79 static size_t queue_subbuffer_size = 0;
82 static sync_t buffer_busy_sync;
84 /* Memory pages count in one subbuffer */
85 static int pages_order_in_subbuffer = 0;
88 int buffer_queue_allocation(size_t subbuffer_size,
89 unsigned int subbuffers_count)
93 unsigned int allocated_buffers = 0;
94 unsigned int allocated_structs = 0;
95 struct swap_subbuffer *clean_tmp_struct;
98 /* Static varibles initialization */
99 queue_subbuffer_size = subbuffer_size;
100 queue_subbuffer_count = subbuffers_count;
101 queue_busy_last_element = 0;
103 /* Set variable pages_in_subbuffer. It is used for allocation and
104 * deallocation memory pages and its value is returned from
105 * swap_buffer_get() and contains page count in one subbuffer.
106 * All this useful only in kernel space. In userspace it is dummy.*/
107 set_pages_order_in_subbuffer(queue_subbuffer_size);
108 /* Sync primitives initialization */
109 sync_init(&read_queue.queue_sync);
110 sync_init(&write_queue.queue_sync);
111 sync_init(&buffer_busy_sync);
113 /* Memory allocation for queue_busy */
114 queue_busy = memory_allocation(sizeof(&queue_busy) * queue_subbuffer_count);
117 result = E_SB_NO_MEM_QUEUE_BUSY;
118 goto buffer_allocation_error_ret;
121 /* Memory allocation for swap_subbuffer structures */
123 /* Allocation for first structure. */
124 write_queue.start_ptr = memory_allocation(sizeof(&write_queue.start_ptr));
126 if (!write_queue.start_ptr) {
127 result = E_SB_NO_MEM_BUFFER_STRUCT;
128 goto buffer_allocation_queue_busy_free;
133 write_queue.end_ptr = write_queue.start_ptr;
135 write_queue.end_ptr->next_in_queue = NULL;
136 write_queue.end_ptr->full_buffer_part = 0;
137 write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
138 if (!write_queue.end_ptr->data_buffer) {
139 print_err("Cannot allocate memory for buffer 1\n");
140 result = E_SB_NO_MEM_DATA_BUFFER;
141 goto buffer_allocation_error_free;
145 print_msg(" Buffer allocated = 0x%x\n", (unsigned long)write_queue.end_ptr->data_buffer);
147 sync_init(&write_queue.end_ptr->buffer_sync);
149 /* Buffer initialization */
150 memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
152 /* Allocation for other structures. */
153 for (i = 1; i < queue_subbuffer_count; i++) {
154 write_queue.end_ptr->next_in_queue =
155 memory_allocation(sizeof(write_queue.end_ptr->next_in_queue));
156 if (!write_queue.end_ptr->next_in_queue) {
157 result = E_SB_NO_MEM_BUFFER_STRUCT;
158 goto buffer_allocation_error_free;
162 /* Now next write_queue.end_ptr is next */
163 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
165 write_queue.end_ptr->next_in_queue = NULL;
166 write_queue.end_ptr->full_buffer_part = 0;
167 write_queue.end_ptr->data_buffer =
168 buffer_allocation(queue_subbuffer_size);
169 if (!write_queue.end_ptr->data_buffer) {
170 result = E_SB_NO_MEM_DATA_BUFFER;
171 goto buffer_allocation_error_free;
175 print_msg(" Buffer allocated = 0x%x, pages_order = %d\n",
176 (unsigned long)buffer_address(write_queue.end_ptr->data_buffer),
177 pages_order_in_subbuffer);
179 sync_init(&write_queue.end_ptr->buffer_sync);
181 /* Buffer initialization */
182 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
183 queue_subbuffer_size);
188 /* In case of errors, this code is called */
189 /* Free all previously allocated memory */
190 buffer_allocation_error_free:
191 clean_tmp_struct = write_queue.start_ptr;
193 for (j = 0; j < allocated_structs; j++) {
194 clean_tmp_struct = write_queue.start_ptr;
195 if (allocated_buffers) {
196 buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
199 if (write_queue.start_ptr != write_queue.end_ptr)
200 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
201 memory_free(clean_tmp_struct);
203 write_queue.end_ptr = NULL;
204 write_queue.start_ptr = NULL;
206 buffer_allocation_queue_busy_free:
207 memory_free(queue_busy);
210 buffer_allocation_error_ret:
214 void buffer_queue_free(void)
216 struct swap_subbuffer *tmp = NULL;
218 //TODO Lock read list semaphore to prevent getting subbuffer from read list
219 /* Set all write buffers to read list */
220 set_all_to_read_list();
222 /* Free buffers and structures memory that are in read list */
223 while (read_queue.start_ptr) {
224 tmp = read_queue.start_ptr;
225 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
226 buffer_free(tmp->data_buffer, queue_subbuffer_size);
227 print_msg(" Buffer free = 0x%x\n", (unsigned long)
228 buffer_address(tmp->data_buffer));
233 memory_free(queue_busy);
236 queue_subbuffer_size = 0;
237 queue_subbuffer_count = 0;
238 read_queue.start_ptr = NULL;
239 read_queue.end_ptr = NULL;
240 write_queue.start_ptr = NULL;
241 write_queue.end_ptr = NULL;
244 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
247 /* XXX Think about checking full_buffer_part for correctness
248 * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
249 * chaning, etc.) this function should be true! */
250 return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
253 /* Get first subbuffer from read list */
254 struct swap_subbuffer *get_from_read_list(void)
256 struct swap_subbuffer *result = NULL;
258 /* Lock read sync primitive */
259 sync_lock(&read_queue.queue_sync);
261 if (read_queue.start_ptr == NULL) {
263 goto get_from_read_list_unlock;
266 result = read_queue.start_ptr;
268 /* If this is the last readable buffer, read_queue.start_ptr next time will
269 * points to NULL and that case is handled in the beginning of function
271 if (read_queue.start_ptr == read_queue.end_ptr) {
272 read_queue.end_ptr = NULL;
274 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
276 get_from_read_list_unlock:
277 /* Unlock read sync primitive */
278 sync_unlock(&read_queue.queue_sync);
283 /* Add subbuffer to read list */
284 void add_to_read_list(struct swap_subbuffer *subbuffer)
287 /* Lock read sync primitive */
288 sync_lock(&read_queue.queue_sync);
290 if (!read_queue.start_ptr)
291 read_queue.start_ptr = subbuffer;
293 if (read_queue.end_ptr) {
294 read_queue.end_ptr->next_in_queue = subbuffer;
296 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
298 read_queue.end_ptr = subbuffer;
300 read_queue.end_ptr->next_in_queue = NULL;
302 /* Unlock read sync primitive */
303 sync_unlock(&read_queue.queue_sync);
306 /* Call add to read list and callback function from driver module */
307 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
311 add_to_read_list(subbuffer);
312 // TODO Handle ret value
313 result = swap_buffer_callback(subbuffer);
318 /* Get first writable subbuffer from write list */
319 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
321 struct swap_subbuffer *result = NULL;
323 /* Callbacks are called at the end of the function to prevent deadlocks */
324 struct queue callback_queue = {
328 struct swap_subbuffer *tmp_buffer = NULL;
331 *ptr_to_write = NULL;
333 /* Lock write list sync primitive */
334 sync_lock(&write_queue.queue_sync);
336 while (write_queue.start_ptr) {
337 /* If start points to NULL => list is empty => exit */
338 if (!write_queue.start_ptr) {
340 goto get_from_write_list_unlock;
343 /* We're found subbuffer */
344 if (is_buffer_enough(write_queue.start_ptr, size)) {
346 result = write_queue.start_ptr;
347 *ptr_to_write = (void *)((unsigned long)
348 (buffer_address(result->data_buffer)) +
349 result->full_buffer_part);
351 /* Add data size to full_buffer_part. Very important to do it in
352 * write_queue.queue_sync spinlock */
353 write_queue.start_ptr->full_buffer_part += size;
355 /* Lock rw sync. Should be unlocked in swap_buffer_write() */
356 sync_lock(&result->buffer_sync);
358 /* This subbuffer is not enough => it goes to read list */
361 result = write_queue.start_ptr;
363 /* If we reached end of the list */
364 if (write_queue.start_ptr == write_queue.end_ptr) {
365 write_queue.end_ptr = NULL;
368 /* Move start write pointer */
369 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
371 /* Add to callback list */
372 if (!callback_queue.start_ptr)
373 callback_queue.start_ptr = result;
375 if (callback_queue.end_ptr)
376 callback_queue.end_ptr->next_in_queue = result;
377 callback_queue.end_ptr = result;
378 callback_queue.end_ptr->next_in_queue = NULL;
383 get_from_write_list_unlock:
384 /* Unlock write list sync primitive */
385 sync_unlock(&write_queue.queue_sync);
387 /* Adding buffers to read list and calling callbacks */
388 for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
389 if (callback_queue.start_ptr == callback_queue.end_ptr)
390 callback_queue.end_ptr = NULL;
392 tmp_buffer = callback_queue.start_ptr;
393 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
395 add_to_read_list_with_callback(tmp_buffer);
401 /* Add subbuffer to write list */
402 void add_to_write_list(struct swap_subbuffer *subbuffer)
404 sync_lock(&write_queue.queue_sync);
407 // TODO Useless memset
408 // memset(buffer_address(subbuffer->data_buffer), 0, queue_subbuffer_size);
409 subbuffer->full_buffer_part = 0;
411 if (!write_queue.start_ptr)
412 write_queue.start_ptr = subbuffer;
414 if (write_queue.end_ptr) {
415 write_queue.end_ptr->next_in_queue = subbuffer;
416 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
418 write_queue.end_ptr = subbuffer;
420 write_queue.end_ptr->next_in_queue = NULL;
422 sync_unlock(&write_queue.queue_sync);
425 /* Add subbuffer to busy list when it is read from out of the buffer */
426 void add_to_busy_list(struct swap_subbuffer *subbuffer)
428 /* Lock busy sync primitive */
429 sync_lock(&buffer_busy_sync);
431 subbuffer->next_in_queue = NULL;
432 queue_busy[queue_busy_last_element] = subbuffer;
433 queue_busy_last_element += 1;
435 /* Unlock busy sync primitive */
436 sync_unlock(&buffer_busy_sync);
439 /* Remove subbuffer from busy list when it is released */
440 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
442 int result = E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
445 /* Lock busy list sync primitive */
446 sync_lock(&buffer_busy_sync);
448 /* Sanitization and removing */
449 for (i = 0; i < queue_busy_last_element; i++) {
450 if (queue_busy[i] == subbuffer) {
451 /* Last element goes here and length is down 1 */
452 queue_busy[i] = queue_busy[queue_busy_last_element - 1];
453 queue_busy_last_element -= 1;
454 result = E_SB_SUCCESS;
459 /* Unlock busy list sync primitive */
460 sync_unlock(&buffer_busy_sync);
465 /* Get subbuffers count in read list */
466 /* XXX Think about locks */
467 int get_full_buffers_count(void)
470 struct swap_subbuffer *buffer = read_queue.start_ptr;
472 while (buffer && buffer->full_buffer_part) {
474 buffer = buffer->next_in_queue;
480 /* Set all subbuffers in write list to read list */
481 void set_all_to_read_list(void)
483 struct swap_subbuffer *buffer = write_queue.start_ptr;
485 /* Locking write sync primitive */
486 sync_lock(&write_queue.queue_sync);
488 while (write_queue.start_ptr) {
489 /* Waiting till semaphore should be posted */
491 // TODO To think: It's not bad as it is, but maybe it would be better locking
492 // semaphore while changing its list? (Not bad now, cause buffer should have
493 // already been stopped).
495 sync_lock(&buffer->buffer_sync);
497 sync_unlock(&buffer->buffer_sync);
499 buffer = write_queue.start_ptr;
501 /* If we reached end of the list */
502 if (write_queue.start_ptr == write_queue.end_ptr) {
503 write_queue.end_ptr = NULL;
505 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
507 add_to_read_list(buffer);
510 /* Unlocking write primitive */
511 sync_unlock(&write_queue.queue_sync);
514 /* Get subbuffers count in busy list */
515 /* XXX Think abount lock */
516 int get_busy_buffers_count(void)
518 return queue_busy_last_element;
521 /* Get memory pages count in subbuffer */
522 int get_pages_count_in_subbuffer(void)
524 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
525 return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;