[FIX] Buffer: fix prevent warnings
[kernel/swap-modules.git] / buffer / buffer_queue.c
1 /*
2  *  SWAP Buffer Module
3  *  modules/buffer/buffer_queue.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2013
20  *
21  * 2013  Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
22  *
23  */
24
25 /* SWAP buffer queues implementation */
26
27 /* For all memory allocation/deallocation operations, except buffer memory
28  * allocation/deallocation should be used 
29  *  memory_allocation(size_t memory_size)
30  *  memory_free(void *ptr)
31  * defines.
32  * For subbuffer allocation/deallocation operations should be used
33  *  buffer_allocation(size_t subbuffer_size)
34  *  buffer_free(void *ptr, size_t subbuffer_size)
35  * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36  * use the following define:
37  *  buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38  * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39  * It will be ok for user space, but fail in kernel space.
40  *
41  * See space_dep_types_and_def.h for details */
42
43
44
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
48 #include "kernel_operations.h"
49
50 /* Queue structure. Consist of pointers to the first and the last elements of
51  * queue. */
52 struct queue {
53         struct swap_subbuffer *start_ptr;
54         struct swap_subbuffer *end_ptr;
55         struct sync_t queue_sync;
56 };
57
58 /* Write queue */
59 struct queue write_queue = {
60         .start_ptr = NULL,
61         .end_ptr = NULL,
62         .queue_sync = {
63                 .flags = 0x0
64         }
65 };
66
67 /* Read queue */
68 struct queue read_queue = {
69         .start_ptr = NULL,
70         .end_ptr = NULL,
71         .queue_sync = {
72                 .flags = 0x0
73         }
74 };
75
76 /* Pointers array. Points to busy buffers */
77 static struct swap_subbuffer **queue_busy = NULL;
78
79 /* Store last busy element */
80 static unsigned int queue_busy_last_element;
81
82 /* Subbuffers count */
83 static unsigned int queue_subbuffer_count = 0;
84
85 /* One subbuffer size */
86 static size_t queue_subbuffer_size = 0;
87
88 /* Busy list sync */
89 static struct sync_t buffer_busy_sync = {
90         .flags = 0x0
91 };
92
93 /* Memory pages count in one subbuffer */
94 static int pages_order_in_subbuffer = 0;
95
96
97 int buffer_queue_allocation(size_t subbuffer_size,
98                             unsigned int subbuffers_count)
99 {
100         unsigned int i = 0;
101         unsigned int j = 0;
102         unsigned int allocated_buffers = 0;
103         unsigned int allocated_structs = 0;
104         struct swap_subbuffer *clean_tmp_struct;
105         int result;
106
107         /* Static varibles initialization */
108         queue_subbuffer_size = subbuffer_size;
109         queue_subbuffer_count = subbuffers_count;
110         queue_busy_last_element = 0;
111
112         /* Set variable pages_in_subbuffer. It is used for allocation and
113          * deallocation memory pages and its value is returned from
114          * swap_buffer_get() and contains page count in one subbuffer.
115          * All this useful only in kernel space. In userspace it is dummy.*/
116         set_pages_order_in_subbuffer(queue_subbuffer_size);
117         /* Sync primitives initialization */
118         sync_init(&read_queue.queue_sync);
119         sync_init(&write_queue.queue_sync);
120         sync_init(&buffer_busy_sync);
121
122         /* Memory allocation for queue_busy */
123         queue_busy = memory_allocation(sizeof(*queue_busy) * queue_subbuffer_count);
124
125         if (!queue_busy) {
126                 result = -E_SB_NO_MEM_QUEUE_BUSY;
127                 goto buffer_allocation_error_ret;
128         }
129
130         /* Memory allocation for swap_subbuffer structures */
131
132         /* Allocation for first structure. */
133         write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
134
135         if (!write_queue.start_ptr) {
136                 result = -E_SB_NO_MEM_BUFFER_STRUCT;
137                 goto buffer_allocation_queue_busy_free;
138         }
139         allocated_structs++;
140
141
142         write_queue.end_ptr = write_queue.start_ptr;
143
144         write_queue.end_ptr->next_in_queue = NULL;
145         write_queue.end_ptr->full_buffer_part = 0;
146         write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
147         if (!write_queue.end_ptr->data_buffer) {
148                 print_err("Cannot allocate memory for buffer 1\n");
149                 result = -E_SB_NO_MEM_DATA_BUFFER;
150                 goto buffer_allocation_error_free;
151         }
152         allocated_buffers++;
153
154         sync_init(&write_queue.end_ptr->buffer_sync);
155
156         /* Buffer initialization */
157         memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
158
159         /* Allocation for other structures. */
160         for (i = 1; i < queue_subbuffer_count; i++) {
161                 write_queue.end_ptr->next_in_queue =
162                     memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
163                 if (!write_queue.end_ptr->next_in_queue) {
164                         result = -E_SB_NO_MEM_BUFFER_STRUCT;
165                         goto buffer_allocation_error_free;
166                 }
167                 allocated_structs++;
168
169                 /* Now next write_queue.end_ptr is next */
170                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
171
172                 write_queue.end_ptr->next_in_queue = NULL;
173                 write_queue.end_ptr->full_buffer_part = 0;
174                 write_queue.end_ptr->data_buffer = 
175                         buffer_allocation(queue_subbuffer_size);
176                 if (!write_queue.end_ptr->data_buffer) {
177                         result = -E_SB_NO_MEM_DATA_BUFFER;
178                         goto buffer_allocation_error_free;
179                 }
180                 allocated_buffers++;
181
182                 sync_init(&write_queue.end_ptr->buffer_sync);
183
184                 /* Buffer initialization */
185                 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
186                        queue_subbuffer_size);
187         }
188
189         return E_SB_SUCCESS;
190
191         /* In case of errors, this code is called */
192         /* Free all previously allocated memory */
193 buffer_allocation_error_free:
194         clean_tmp_struct = write_queue.start_ptr;
195
196         for (j = 0; j < allocated_structs; j++) {
197                 clean_tmp_struct = write_queue.start_ptr;
198                 if (allocated_buffers) {
199                         buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
200                         allocated_buffers--;
201                 }
202                 if (write_queue.start_ptr != write_queue.end_ptr)
203                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
204                 memory_free(clean_tmp_struct);
205         }
206         write_queue.end_ptr = NULL;
207         write_queue.start_ptr = NULL;
208
209 buffer_allocation_queue_busy_free:
210         memory_free(queue_busy);
211         queue_busy = NULL;
212
213 buffer_allocation_error_ret:
214         return result;
215 }
216
217 int buffer_queue_reset(void)
218 {
219         struct swap_subbuffer *buffer = read_queue.start_ptr;
220
221         /* Check if there are some subbuffers in busy list. If so - return error */
222         if (get_busy_buffers_count())
223                 return -E_SB_UNRELEASED_BUFFERS;
224
225         /* Lock read sync primitive */
226         sync_lock(&read_queue.queue_sync);
227
228         /* Set all subbuffers in read list to write list and reinitialize them */
229         while (read_queue.start_ptr) {
230
231                 /* Lock buffer sync primitive to prevent writing to buffer if it had
232                  * been selected for writing, but still wasn't wrote. */
233                 sync_lock(&buffer->buffer_sync);
234
235                 buffer = read_queue.start_ptr;
236
237                 /* If we reached end of the list */
238                 if (read_queue.start_ptr == read_queue.end_ptr) {
239                         read_queue.end_ptr = NULL;
240                 }
241                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
242
243                 /* Reinit full buffer part */
244                 buffer->full_buffer_part = 0;
245
246                 add_to_write_list(buffer);
247
248                 /* Unlock buffer sync primitive */
249                 sync_unlock(&buffer->buffer_sync);
250         }
251
252         /* Unlock read primitive */
253         sync_unlock(&read_queue.queue_sync);
254
255         return E_SB_SUCCESS;
256 }
257
258 void buffer_queue_free(void)
259 {
260         struct swap_subbuffer *tmp = NULL;
261
262         /* Lock all sync primitives to prevet accessing free memory */
263         sync_lock(&write_queue.queue_sync);
264         sync_lock(&read_queue.queue_sync);
265         sync_lock(&buffer_busy_sync);
266
267         /* Free buffers and structures memory that are in read list */
268         while (read_queue.start_ptr) {
269                 tmp = read_queue.start_ptr;
270                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
271                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
272                 memory_free(tmp);
273         }
274
275         /* Free buffers and structures memory that are in read list */
276         while (write_queue.start_ptr) {
277                 tmp = write_queue.start_ptr;
278                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
279                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
280                 memory_free(tmp);
281         }
282
283         /* Free busy_list */
284         memory_free(queue_busy);
285         queue_busy = NULL;
286
287         queue_subbuffer_size = 0;
288         queue_subbuffer_count = 0;
289         read_queue.start_ptr = NULL;
290         read_queue.end_ptr = NULL;
291         write_queue.start_ptr = NULL;
292         write_queue.end_ptr = NULL;
293
294         /* Unlock all sync primitives */
295         sync_unlock(&buffer_busy_sync);
296         sync_unlock(&read_queue.queue_sync);
297         sync_unlock(&write_queue.queue_sync);
298 }
299
300 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
301                                      size_t size)
302 {
303         /* XXX Think about checking full_buffer_part for correctness 
304          * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
305          * chaning, etc.) this function should be true! */
306         return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
307 }
308
309 /* Get first subbuffer from read list */
310 struct swap_subbuffer *get_from_read_list(void)
311 {
312         struct swap_subbuffer *result = NULL;
313
314         /* Lock read sync primitive */
315         sync_lock(&read_queue.queue_sync);
316
317         if (read_queue.start_ptr == NULL) {
318                 result = NULL;
319                 goto get_from_read_list_unlock;
320         }
321
322         result = read_queue.start_ptr;
323
324         /* If this is the last readable buffer, read_queue.start_ptr next time will 
325          * points to NULL and that case is handled in the beginning of function
326          */
327         if (read_queue.start_ptr == read_queue.end_ptr) {
328                 read_queue.end_ptr = NULL;
329         }
330         read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
331
332 get_from_read_list_unlock:
333         /* Unlock read sync primitive */
334         sync_unlock(&read_queue.queue_sync);
335
336         return result;
337 }
338
339 /* Add subbuffer to read list */
340 void add_to_read_list(struct swap_subbuffer *subbuffer)
341 {
342
343         /* Lock read sync primitive */
344         sync_lock(&read_queue.queue_sync);
345
346         if (!read_queue.start_ptr)
347                 read_queue.start_ptr = subbuffer;
348
349         if (read_queue.end_ptr) {
350                 read_queue.end_ptr->next_in_queue = subbuffer;
351
352                 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
353         } else {
354                 read_queue.end_ptr = subbuffer;
355         }
356         read_queue.end_ptr->next_in_queue = NULL;
357
358         /* Unlock read sync primitive */
359         sync_unlock(&read_queue.queue_sync);
360 }
361
362 /* Call add to read list and callback function from driver module */
363 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
364 {
365         int result = 0;
366
367         add_to_read_list(subbuffer);
368         // TODO Handle ret value
369         result = swap_buffer_callback(subbuffer);
370
371         return result;
372 }
373
374 /* Get first writable subbuffer from write list */
375 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
376 {
377         struct swap_subbuffer *result = NULL;
378
379         /* Callbacks are called at the end of the function to prevent deadlocks */
380         struct queue callback_queue = {
381                 .start_ptr = NULL,
382                 .end_ptr = NULL,
383                 .queue_sync = {
384                         .flags = 0x0
385                 }
386         };
387         struct swap_subbuffer *tmp_buffer = NULL;
388
389         /* Init pointer */
390         *ptr_to_write = NULL;
391
392         /* Lock write list sync primitive */
393         sync_lock(&write_queue.queue_sync);
394
395         while (write_queue.start_ptr) {
396
397                 /* We're found subbuffer */
398                 if (is_buffer_enough(write_queue.start_ptr, size)) {
399
400                         result = write_queue.start_ptr;
401                         *ptr_to_write = (void *)((unsigned long)
402                                                  (buffer_address(result->data_buffer)) +
403                                                  result->full_buffer_part);
404
405                         /* Add data size to full_buffer_part. Very important to do it in
406                          * write_queue.queue_sync spinlock */
407                         write_queue.start_ptr->full_buffer_part += size;
408
409                         /* Lock rw sync. Should be unlocked in swap_buffer_write() */
410                         sync_lock(&result->buffer_sync);
411                         break;
412                 /* This subbuffer is not enough => it goes to read list */
413                 } else {
414                         result = write_queue.start_ptr;
415
416                         /* If we reached end of the list */
417                         if (write_queue.start_ptr == write_queue.end_ptr) {
418                                 write_queue.end_ptr = NULL;
419                         }
420
421                         /* Move start write pointer */
422                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
423
424                         /* Add to callback list */
425                         if (!callback_queue.start_ptr)
426                                 callback_queue.start_ptr = result;
427
428                         if (callback_queue.end_ptr)
429                                 callback_queue.end_ptr->next_in_queue = result;
430                         callback_queue.end_ptr = result;
431                         callback_queue.end_ptr->next_in_queue = NULL;
432                         result = NULL;
433                 }
434         }
435
436         /* Unlock write list sync primitive */
437         sync_unlock(&write_queue.queue_sync);
438
439         /* Adding buffers to read list and calling callbacks */
440         for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
441                 if (callback_queue.start_ptr == callback_queue.end_ptr)
442                         callback_queue.end_ptr = NULL;
443
444                 tmp_buffer = callback_queue.start_ptr;
445                 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
446
447                 add_to_read_list_with_callback(tmp_buffer);
448         }
449
450         return result;
451 }
452
453 /* Add subbuffer to write list */
454 void add_to_write_list(struct swap_subbuffer *subbuffer)
455 {
456         sync_lock(&write_queue.queue_sync);
457
458         /* Reinitialize */
459         // TODO Useless memset
460 //      memset(buffer_address(subbuffer->data_buffer), 0, queue_subbuffer_size);
461         subbuffer->full_buffer_part = 0;
462
463         if (!write_queue.start_ptr)
464                 write_queue.start_ptr = subbuffer;
465
466         if (write_queue.end_ptr) {
467                 write_queue.end_ptr->next_in_queue = subbuffer;
468                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
469         } else {
470                 write_queue.end_ptr = subbuffer;
471         }
472         write_queue.end_ptr->next_in_queue = NULL;
473
474         sync_unlock(&write_queue.queue_sync);
475 }
476
477 /* Add subbuffer to busy list when it is read from out of the buffer */
478 void add_to_busy_list(struct swap_subbuffer *subbuffer)
479 {
480         /* Lock busy sync primitive */
481         sync_lock(&buffer_busy_sync);
482
483         subbuffer->next_in_queue = NULL;
484         queue_busy[queue_busy_last_element] = subbuffer;
485         queue_busy_last_element += 1;
486
487         /* Unlock busy sync primitive */
488         sync_unlock(&buffer_busy_sync);
489 }
490
491 /* Remove subbuffer from busy list when it is released */
492 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
493 {
494         int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
495         int i;
496
497         /* Lock busy list sync primitive */
498         sync_lock(&buffer_busy_sync);
499
500         /* Sanitization and removing */
501         for (i = 0; i < queue_busy_last_element; i++) {
502                 if (queue_busy[i] == subbuffer) {
503                         /* Last element goes here and length is down 1 */
504                         queue_busy[i] = queue_busy[queue_busy_last_element - 1];
505                         queue_busy_last_element -= 1;
506                         result = E_SB_SUCCESS;
507                         break;
508                 }
509         }
510
511         /* Unlock busy list sync primitive */
512         sync_unlock(&buffer_busy_sync);
513
514         return result;
515 }
516
517 /* Get subbuffers count in read list */
518 /* XXX Think about locks */
519 int get_full_buffers_count(void)
520 {
521         int result = 0;
522         struct swap_subbuffer *buffer = read_queue.start_ptr;
523
524         while (buffer && buffer->full_buffer_part) {
525                 result += 1;
526                 buffer = buffer->next_in_queue;
527         }
528
529         return result;
530 }
531
532 /* Set all subbuffers in write list to read list */
533 void buffer_queue_flush(void)
534 {
535         struct swap_subbuffer *buffer = write_queue.start_ptr;
536
537         /* Locking write sync primitive */
538         sync_lock(&write_queue.queue_sync);
539
540         while (write_queue.start_ptr &&
541                write_queue.start_ptr->full_buffer_part) {
542
543                 /* Lock buffer sync primitive to prevent writing to buffer if it had
544                  * been selected for writing, but still wasn't wrote. */
545                 sync_lock(&buffer->buffer_sync);
546
547                 buffer = write_queue.start_ptr;
548
549                 /* If we reached end of the list */
550                 if (write_queue.start_ptr == write_queue.end_ptr) {
551                         write_queue.end_ptr = NULL;
552                 }
553                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
554
555                 add_to_read_list(buffer);
556
557                 /* Unlock buffer sync primitive */
558                 sync_unlock(&buffer->buffer_sync);
559         }
560
561         /* Unlock write primitive */
562         sync_unlock(&write_queue.queue_sync);
563 }
564
565 /* Get subbuffers count in busy list */
566 int get_busy_buffers_count(void)
567 {
568         int result;
569
570         sync_lock(&buffer_busy_sync);
571         result = queue_busy_last_element;
572         sync_unlock(&buffer_busy_sync);
573
574         return result;
575 }
576
577 /* Get memory pages count in subbuffer */
578 int get_pages_count_in_subbuffer(void)
579 {
580 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
581         return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;
582 }