[REFACTOR] Buffer: struct queue -> struct queue_t
[kernel/swap-modules.git] / buffer / buffer_queue.c
1 /*
2  *  SWAP Buffer Module
3  *  modules/buffer/buffer_queue.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2013
20  *
21  * 2013  Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
22  *
23  */
24
25 /* SWAP buffer queues implementation */
26
27 /* For all memory allocation/deallocation operations, except buffer memory
28  * allocation/deallocation should be used 
29  *  memory_allocation(size_t memory_size)
30  *  memory_free(void *ptr)
31  * defines.
32  * For subbuffer allocation/deallocation operations should be used
33  *  buffer_allocation(size_t subbuffer_size)
34  *  buffer_free(void *ptr, size_t subbuffer_size)
35  * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36  * use the following define:
37  *  buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38  * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39  * It will be ok for user space, but fail in kernel space.
40  *
41  * See space_dep_types_and_def.h for details */
42
43
44
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
48 #include "kernel_operations.h"
49
50 /* Queue structure. Consist of pointers to the first and the last elements of
51  * queue. */
52 struct queue_t {
53         struct swap_subbuffer *start_ptr;
54         struct swap_subbuffer *end_ptr;
55         unsigned int subbuffers_count;
56         struct sync_t queue_sync;
57 };
58
59 /* Write queue */
60 struct queue_t write_queue = {
61         .start_ptr = NULL,
62         .end_ptr = NULL,
63         .subbuffers_count = 0,
64         .queue_sync = {
65                 .flags = 0x0
66         }
67 };
68
69 /* Read queue */
70 struct queue_t read_queue = {
71         .start_ptr = NULL,
72         .end_ptr = NULL,
73         .subbuffers_count = 0,
74         .queue_sync = {
75                 .flags = 0x0
76         }
77 };
78
79 /* Pointers array. Points to busy buffers */
80 static struct swap_subbuffer **queue_busy = NULL;
81
82 /* Store last busy element */
83 static unsigned int queue_busy_last_element;
84
85 /* Subbuffers count */
86 static unsigned int queue_subbuffer_count = 0;
87
88 /* One subbuffer size */
89 static size_t queue_subbuffer_size = 0;
90
91 /* Busy list sync */
92 static struct sync_t buffer_busy_sync = {
93         .flags = 0x0
94 };
95
96 /* Memory pages count in one subbuffer */
97 static int pages_order_in_subbuffer = 0;
98
99
100 int buffer_queue_allocation(size_t subbuffer_size,
101                             unsigned int subbuffers_count)
102 {
103         unsigned int i = 0;
104         unsigned int j = 0;
105         unsigned int allocated_buffers = 0;
106         unsigned int allocated_structs = 0;
107         struct swap_subbuffer *clean_tmp_struct;
108         int result;
109
110         /* Static varibles initialization */
111         queue_subbuffer_size = subbuffer_size;
112         queue_subbuffer_count = subbuffers_count;
113         queue_busy_last_element = 0;
114
115         /* Set variable pages_in_subbuffer. It is used for allocation and
116          * deallocation memory pages and its value is returned from
117          * swap_buffer_get() and contains page count in one subbuffer.
118          * All this useful only in kernel space. In userspace it is dummy.*/
119         set_pages_order_in_subbuffer(queue_subbuffer_size);
120         /* Sync primitives initialization */
121         sync_init(&read_queue.queue_sync);
122         sync_init(&write_queue.queue_sync);
123         sync_init(&buffer_busy_sync);
124
125         /* Memory allocation for queue_busy */
126         queue_busy = memory_allocation(sizeof(**queue_busy) * queue_subbuffer_count);
127
128         if (!queue_busy) {
129                 result = -E_SB_NO_MEM_QUEUE_BUSY;
130                 goto buffer_allocation_error_ret;
131         }
132
133         /* Memory allocation for swap_subbuffer structures */
134
135         /* Allocation for first structure. */
136         write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
137
138         if (!write_queue.start_ptr) {
139                 result = -E_SB_NO_MEM_BUFFER_STRUCT;
140                 goto buffer_allocation_queue_busy_free;
141         }
142         allocated_structs++;
143
144
145         write_queue.end_ptr = write_queue.start_ptr;
146
147         write_queue.end_ptr->next_in_queue = NULL;
148         write_queue.end_ptr->full_buffer_part = 0;
149         write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
150         if (!write_queue.end_ptr->data_buffer) {
151                 print_err("Cannot allocate memory for buffer 1\n");
152                 result = -E_SB_NO_MEM_DATA_BUFFER;
153                 goto buffer_allocation_error_free;
154         }
155         allocated_buffers++;
156
157         sync_init(&write_queue.end_ptr->buffer_sync);
158
159         /* Buffer initialization */
160         memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
161
162         /* Allocation for other structures. */
163         for (i = 1; i < queue_subbuffer_count; i++) {
164                 write_queue.end_ptr->next_in_queue =
165                     memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
166                 if (!write_queue.end_ptr->next_in_queue) {
167                         result = -E_SB_NO_MEM_BUFFER_STRUCT;
168                         goto buffer_allocation_error_free;
169                 }
170                 allocated_structs++;
171
172                 /* Now next write_queue.end_ptr is next */
173                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
174
175                 write_queue.end_ptr->next_in_queue = NULL;
176                 write_queue.end_ptr->full_buffer_part = 0;
177                 write_queue.end_ptr->data_buffer = 
178                         buffer_allocation(queue_subbuffer_size);
179                 if (!write_queue.end_ptr->data_buffer) {
180                         result = -E_SB_NO_MEM_DATA_BUFFER;
181                         goto buffer_allocation_error_free;
182                 }
183                 allocated_buffers++;
184
185                 sync_init(&write_queue.end_ptr->buffer_sync);
186
187                 /* Buffer initialization */
188                 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
189                        queue_subbuffer_size);
190         }
191
192         /* All subbuffers are in write list */
193         write_queue.subbuffers_count = subbuffers_count;
194
195         return E_SB_SUCCESS;
196
197         /* In case of errors, this code is called */
198         /* Free all previously allocated memory */
199 buffer_allocation_error_free:
200         clean_tmp_struct = write_queue.start_ptr;
201
202         for (j = 0; j < allocated_structs; j++) {
203                 clean_tmp_struct = write_queue.start_ptr;
204                 if (allocated_buffers) {
205                         buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
206                         allocated_buffers--;
207                 }
208                 if (write_queue.start_ptr != write_queue.end_ptr)
209                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
210                 memory_free(clean_tmp_struct);
211         }
212         write_queue.end_ptr = NULL;
213         write_queue.start_ptr = NULL;
214
215 buffer_allocation_queue_busy_free:
216         memory_free(queue_busy);
217         queue_busy = NULL;
218
219 buffer_allocation_error_ret:
220         return result;
221 }
222
223 int buffer_queue_reset(void)
224 {
225         struct swap_subbuffer *buffer = read_queue.start_ptr;
226
227         /* Check if there are some subbuffers in busy list. If so - return error */
228         if (get_busy_buffers_count())
229                 return -E_SB_UNRELEASED_BUFFERS;
230
231         /* Lock read sync primitive */
232         sync_lock(&read_queue.queue_sync);
233
234         /* Set all subbuffers in read list to write list and reinitialize them */
235         while (read_queue.start_ptr) {
236
237                 /* Lock buffer sync primitive to prevent writing to buffer if it had
238                  * been selected for writing, but still wasn't wrote. */
239                 sync_lock(&buffer->buffer_sync);
240
241                 buffer = read_queue.start_ptr;
242
243                 /* If we reached end of the list */
244                 if (read_queue.start_ptr == read_queue.end_ptr) {
245                         read_queue.end_ptr = NULL;
246                 }
247                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
248
249                 /* Reinit full buffer part */
250                 buffer->full_buffer_part = 0;
251
252                 add_to_write_list(buffer);
253
254                 /* Unlock buffer sync primitive */
255                 sync_unlock(&buffer->buffer_sync);
256         }
257
258         /* Unlock read primitive */
259         sync_unlock(&read_queue.queue_sync);
260
261         return E_SB_SUCCESS;
262 }
263
264 void buffer_queue_free(void)
265 {
266         struct swap_subbuffer *tmp = NULL;
267
268         /* Lock all sync primitives to prevet accessing free memory */
269         sync_lock(&write_queue.queue_sync);
270         sync_lock(&read_queue.queue_sync);
271         sync_lock(&buffer_busy_sync);
272
273         /* Free buffers and structures memory that are in read list */
274         while (read_queue.start_ptr) {
275                 tmp = read_queue.start_ptr;
276                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
277                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
278                 memory_free(tmp);
279         }
280
281         /* Free buffers and structures memory that are in read list */
282         while (write_queue.start_ptr) {
283                 tmp = write_queue.start_ptr;
284                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
285                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
286                 memory_free(tmp);
287         }
288
289         /* Free busy_list */
290         memory_free(queue_busy);
291         queue_busy = NULL;
292
293         queue_subbuffer_size = 0;
294         queue_subbuffer_count = 0;
295         read_queue.start_ptr = NULL;
296         read_queue.end_ptr = NULL;
297         write_queue.start_ptr = NULL;
298         write_queue.end_ptr = NULL;
299
300         /* Unlock all sync primitives */
301         sync_unlock(&buffer_busy_sync);
302         sync_unlock(&read_queue.queue_sync);
303         sync_unlock(&write_queue.queue_sync);
304 }
305
306 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
307                                      size_t size)
308 {
309         /* XXX Think about checking full_buffer_part for correctness 
310          * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
311          * chaning, etc.) this function should be true! */
312         return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
313 }
314
315 /* Get first subbuffer from read list */
316 struct swap_subbuffer *get_from_read_list(void)
317 {
318         struct swap_subbuffer *result = NULL;
319
320         /* Lock read sync primitive */
321         sync_lock(&read_queue.queue_sync);
322
323         if (read_queue.start_ptr == NULL) {
324                 result = NULL;
325                 goto get_from_read_list_unlock;
326         }
327
328         result = read_queue.start_ptr;
329
330         /* If this is the last readable buffer, read_queue.start_ptr next time will 
331          * points to NULL and that case is handled in the beginning of function
332          */
333         if (read_queue.start_ptr == read_queue.end_ptr) {
334                 read_queue.end_ptr = NULL;
335         }
336         read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
337         --read_queue.subbuffers_count;
338
339 get_from_read_list_unlock:
340         /* Unlock read sync primitive */
341         sync_unlock(&read_queue.queue_sync);
342
343         return result;
344 }
345
346 /* Add subbuffer to read list */
347 void add_to_read_list(struct swap_subbuffer *subbuffer)
348 {
349         /* Lock read sync primitive */
350         sync_lock(&read_queue.queue_sync);
351
352         if (!read_queue.start_ptr)
353                 read_queue.start_ptr = subbuffer;
354
355         if (read_queue.end_ptr) {
356                 read_queue.end_ptr->next_in_queue = subbuffer;
357
358                 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
359         } else {
360                 read_queue.end_ptr = subbuffer;
361         }
362         read_queue.end_ptr->next_in_queue = NULL;
363         ++read_queue.subbuffers_count;
364
365         /* Unlock read sync primitive */
366         sync_unlock(&read_queue.queue_sync);
367 }
368
369 /* Call add to read list and callback function from driver module */
370 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
371 {
372         int result = 0;
373
374         add_to_read_list(subbuffer);
375         // TODO Handle ret value
376         result = swap_buffer_callback(subbuffer);
377
378         return result;
379 }
380
381 /* Returns subbuffers to read count */
382 unsigned int get_readable_buf_cnt(void)
383 {
384         return read_queue.subbuffers_count;
385 }
386
387
388 /* Get first writable subbuffer from write list */
389 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
390 {
391         struct swap_subbuffer *result = NULL;
392
393         /* Callbacks are called at the end of the function to prevent deadlocks */
394         struct queue_t callback_queue = {
395                 .start_ptr = NULL,
396                 .end_ptr = NULL,
397                 .queue_sync = {
398                         .flags = 0x0
399                 }
400         };
401         struct swap_subbuffer *tmp_buffer = NULL;
402
403         /* Init pointer */
404         *ptr_to_write = NULL;
405
406         /* Lock write list sync primitive */
407         sync_lock(&write_queue.queue_sync);
408
409         while (write_queue.start_ptr) {
410
411                 /* We're found subbuffer */
412                 if (is_buffer_enough(write_queue.start_ptr, size)) {
413
414                         result = write_queue.start_ptr;
415                         *ptr_to_write = (void *)((unsigned long)
416                                                  (buffer_address(result->data_buffer)) +
417                                                  result->full_buffer_part);
418
419                         /* Add data size to full_buffer_part. Very important to do it in
420                          * write_queue.queue_sync spinlock */
421                         write_queue.start_ptr->full_buffer_part += size;
422
423                         /* Lock rw sync. Should be unlocked in swap_buffer_write() */
424                         sync_lock(&result->buffer_sync);
425                         break;
426                 /* This subbuffer is not enough => it goes to read list */
427                 } else {
428                         result = write_queue.start_ptr;
429
430                         /* If we reached end of the list */
431                         if (write_queue.start_ptr == write_queue.end_ptr) {
432                                 write_queue.end_ptr = NULL;
433                         }
434
435                         /* Move start write pointer */
436                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
437                         --write_queue.subbuffers_count;
438
439                         /* Add to callback list */
440                         if (!callback_queue.start_ptr)
441                                 callback_queue.start_ptr = result;
442
443                         if (callback_queue.end_ptr)
444                                 callback_queue.end_ptr->next_in_queue = result;
445                         callback_queue.end_ptr = result;
446                         callback_queue.end_ptr->next_in_queue = NULL;
447                         result = NULL;
448                 }
449         }
450
451         /* Unlock write list sync primitive */
452         sync_unlock(&write_queue.queue_sync);
453
454         /* Adding buffers to read list and calling callbacks */
455         for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
456                 if (callback_queue.start_ptr == callback_queue.end_ptr)
457                         callback_queue.end_ptr = NULL;
458
459                 tmp_buffer = callback_queue.start_ptr;
460                 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
461
462                 add_to_read_list_with_callback(tmp_buffer);
463         }
464
465         return result;
466 }
467
468 /* Add subbuffer to write list */
469 void add_to_write_list(struct swap_subbuffer *subbuffer)
470 {
471         sync_lock(&write_queue.queue_sync);
472
473         /* Reinitialize */
474         subbuffer->full_buffer_part = 0;
475
476         if (!write_queue.start_ptr)
477                 write_queue.start_ptr = subbuffer;
478
479         if (write_queue.end_ptr) {
480                 write_queue.end_ptr->next_in_queue = subbuffer;
481                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
482         } else {
483                 write_queue.end_ptr = subbuffer;
484         }
485         write_queue.end_ptr->next_in_queue = NULL;
486         ++write_queue.subbuffers_count;
487
488         sync_unlock(&write_queue.queue_sync);
489 }
490
491 /* Returns subbuffers to write count */
492 unsigned int get_writable_buf_cnt(void)
493 {
494         return write_queue.subbuffers_count;
495 }
496
497
498 /* Add subbuffer to busy list when it is read from out of the buffer */
499 void add_to_busy_list(struct swap_subbuffer *subbuffer)
500 {
501         /* Lock busy sync primitive */
502         sync_lock(&buffer_busy_sync);
503
504         subbuffer->next_in_queue = NULL;
505         queue_busy[queue_busy_last_element] = subbuffer;
506         queue_busy_last_element += 1;
507
508         /* Unlock busy sync primitive */
509         sync_unlock(&buffer_busy_sync);
510 }
511
512 /* Remove subbuffer from busy list when it is released */
513 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
514 {
515         int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
516         int i;
517
518         /* Lock busy list sync primitive */
519         sync_lock(&buffer_busy_sync);
520
521         /* Sanitization and removing */
522         for (i = 0; i < queue_busy_last_element; i++) {
523                 if (queue_busy[i] == subbuffer) {
524                         /* Last element goes here and length is down 1 */
525                         queue_busy[i] = queue_busy[queue_busy_last_element - 1];
526                         queue_busy_last_element -= 1;
527                         result = E_SB_SUCCESS;
528                         break;
529                 }
530         }
531
532         /* Unlock busy list sync primitive */
533         sync_unlock(&buffer_busy_sync);
534
535         return result;
536 }
537
538 /* Set all subbuffers in write list to read list */
539 void buffer_queue_flush(void)
540 {
541         struct swap_subbuffer *buffer = write_queue.start_ptr;
542
543         /* Locking write sync primitive */
544         sync_lock(&write_queue.queue_sync);
545
546         while (write_queue.start_ptr &&
547                write_queue.start_ptr->full_buffer_part) {
548
549                 /* Lock buffer sync primitive to prevent writing to buffer if it had
550                  * been selected for writing, but still wasn't wrote. */
551                 sync_lock(&buffer->buffer_sync);
552
553                 buffer = write_queue.start_ptr;
554
555                 /* If we reached end of the list */
556                 if (write_queue.start_ptr == write_queue.end_ptr) {
557                         write_queue.end_ptr = NULL;
558                 }
559                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
560                 --write_queue.subbuffers_count;
561
562                 add_to_read_list(buffer);
563
564                 /* Unlock buffer sync primitive */
565                 sync_unlock(&buffer->buffer_sync);
566         }
567
568         /* Unlock write primitive */
569         sync_unlock(&write_queue.queue_sync);
570 }
571
572 /* Get subbuffers count in busy list */
573 int get_busy_buffers_count(void)
574 {
575         int result;
576
577         sync_lock(&buffer_busy_sync);
578         result = queue_busy_last_element;
579         sync_unlock(&buffer_busy_sync);
580
581         return result;
582 }
583
584 /* Get memory pages count in subbuffer */
585 int get_pages_count_in_subbuffer(void)
586 {
587 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
588         return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;
589 }