Merge branch 'kernel' of 106.109.8.71:/srv/git/dbi into kernel
[kernel/swap-modules.git] / buffer / buffer_queue.c
1 /*
2  *  SWAP Buffer Module
3  *  modules/buffer/swap_buffer_module.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2013
20  *
21  * 2013         Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
22  *
23  */
24
25 /* SWAP buffer queues implementation */
26
27 /* For all memory allocation/deallocation operations, except buffer memory
28  * allocation/deallocation should be used 
29  *  memory_allocation(size_t memory_size)
30  *  memory_free(void* ptr)
31  * defines.
32  * For subbuffer allocation/deallocation operations should be used
33  *  buffer_allocation(size_t subbuffer_size)
34  *  buffer_free(void *ptr, size_t subbuffer_size)
35  * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36  * use the following define:
37  *  buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38  * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39  * It will be ok for user space, but fail in kernel space.
40  *
41  * See space_dep_types_and_def.h for details */
42
43
44
45 #include "buffer_queue.h"
46 #include "buffer_description.h"
47 #include "swap_buffer_to_buffer_queue.h"
48 #include "space_dep_operations.h"
49
50 typedef struct swap_buffer* write_start_ptr_type;
51 typedef struct swap_buffer* write_end_ptr_type;
52 typedef struct swap_buffer* read_start_ptr_type;
53 typedef struct swap_buffer* read_end_ptr_type;
54
55 static write_start_ptr_type queue_write_start_ptr = NULL;   //Points to the
56                                                             //write queue first
57                                                             //element
58 static write_end_ptr_type queue_write_end_ptr = NULL;       //Points to the
59                                                             //write queue last
60                                                             //element
61 static read_start_ptr_type queue_read_start_ptr = NULL;     //Points to the read
62                                                             //queue first 
63                                                             //element
64 static read_end_ptr_type queue_read_end_ptr = NULL;         //Points to the read
65                                                             //queue last element
66 static struct swap_buffer** queue_busy = NULL;          //Pointers array. Points
67                                                         //to occupied buffers
68 static unsigned int queue_busy_last_element;            //Store last occupied
69                                                         //element in queue_busy
70 static unsigned int queue_subbuffer_count = 0;          //Subbuffers count
71 static size_t queue_subbuffer_size = 0;                 //Subbuffers size
72 static buffer_access_sync_type buffer_read_sync;        //add_to_read_list and
73                                                         //get_from_read_list 
74                                                         //sync
75 static buffer_access_sync_type buffer_write_sync;       //add_to_write_list and
76                                                         //get_from_write_list
77                                                         //sync
78 static buffer_access_sync_type buffer_busy_sync;        //add_to_busy_list and
79                                                         //remove_from_busy_list
80                                                         //sync
81 static int pages_order_in_subbuffer = 0;                //Page count in one
82                                                         //subbuffer
83
84
85 int buffer_queue_allocation(const size_t subbuffer_size,
86                             const unsigned int subbuffers_count)
87 {
88     int result = 0;
89     int i;
90
91     /*  0 - ok
92      * -1 - memory for queue_busy wasn't allocated
93      * -2 - memory for swap_buffer structure wasn't allocated
94      * -3 - memory for buffer wasn't allocated
95      * -4 - semaphore cannot be inited
96      * -5 - sync primitives cannot be inited
97      */
98
99     /* Static varibles initialization */
100     queue_subbuffer_size = subbuffer_size;
101     queue_subbuffer_count = subbuffers_count;
102     queue_busy_last_element = 0;
103
104     /* Set variable pages_in_subbuffer. It is used for allocation and
105      * deallocation memory pages and its value is returned from
106      * swap_buffer_get() and contains page count in one subbuffer.
107      * All this useful only in kernel space. In userspace it is dummy.*/
108     set_pages_order_in_subbuffer(queue_subbuffer_size);
109
110     /* Sync primitives initialization */
111     if (buffer_access_init(&buffer_read_sync)) {
112         result = -5;
113         return result;
114     }
115     if (buffer_access_init(&buffer_write_sync)) {
116         result = -5;
117         return result;
118     }
119     if (buffer_access_init(&buffer_busy_sync)) {
120         result = -5;
121         return result;
122     }
123
124     /* Memory allocation for queue_busy */
125     queue_busy = memory_allocation(sizeof(struct swap_buffer*) *
126                                    queue_subbuffer_count);
127
128     if (!queue_busy) {
129         result = -1;
130         return result;
131     }
132
133     /* Memory allocation for swap_buffer structures */
134     /* Allocation for first structure. */
135
136     queue_write_start_ptr = memory_allocation(sizeof(struct swap_buffer));
137
138     if (!queue_write_start_ptr) {
139         result = -2;
140         memory_free(queue_busy);
141         queue_busy = NULL;
142         return result;
143     }
144     queue_write_end_ptr = queue_write_start_ptr;
145
146     queue_write_end_ptr->next_in_queue = NULL;
147     queue_write_end_ptr->full_buffer_part = 0;
148     queue_write_end_ptr->buffer = buffer_allocation(queue_subbuffer_size);
149     if (!queue_write_end_ptr->buffer) {
150         print_err("Cannot allocate memory for buffer 1\n");
151         result = -3;
152         memory_free(queue_busy);
153         memory_free(queue_write_start_ptr);
154         queue_write_start_ptr = NULL;
155         queue_busy = NULL;
156
157         return result;
158     }
159
160     print_msg(" Buffer allocated = 0x%x\n", (unsigned long)queue_write_end_ptr->buffer);
161
162     if (buffer_rw_init(&queue_write_end_ptr->buffer_sync) != 0) {
163         result = -4;
164         memory_free(queue_busy);
165         queue_busy = NULL;
166         memory_free(queue_write_start_ptr);
167         queue_write_start_ptr = NULL;
168         return result;
169     }
170
171     /* Buffer initialization */
172     memset(buffer_address(queue_write_end_ptr->buffer), 0, queue_subbuffer_size);
173
174     /* Allocation for other structures. */
175     for (i = 1; i < queue_subbuffer_count; i++) {
176         queue_write_end_ptr->next_in_queue = memory_allocation(sizeof(struct swap_buffer));
177         if (!queue_write_end_ptr->next_in_queue) {
178         /* Free all previously allocated memory */
179             int j;
180             struct swap_buffer *clean_tmp_struct = queue_write_start_ptr;
181
182             result = -2;
183             for (j = 0; j < i; j++) {
184                 clean_tmp_struct = queue_write_start_ptr;
185                 if (queue_write_start_ptr != queue_write_end_ptr) {
186                     queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
187                 }
188                 buffer_free(clean_tmp_struct->buffer,queue_subbuffer_size);
189                 memory_free(clean_tmp_struct);
190             }
191             queue_write_end_ptr = NULL;
192             queue_write_start_ptr = NULL;
193             memory_free(queue_busy);
194             queue_busy = NULL;
195             return result;
196         }
197
198         /* Now next queue_write_end_ptr is next */
199         queue_write_end_ptr = queue_write_end_ptr->next_in_queue;
200
201         queue_write_end_ptr->next_in_queue = NULL;
202         queue_write_end_ptr->full_buffer_part = 0;
203         queue_write_end_ptr->buffer = buffer_allocation(queue_subbuffer_size);
204         if (!queue_write_end_ptr->buffer) {
205         /* Free all previously allocated memory */
206             int j;
207             struct swap_buffer *clean_tmp_struct = queue_write_start_ptr;
208
209             result = -3;
210             print_err("Cannot allocate memory for buffer %d\n", i+1);
211
212             for (j = 0; j < i; j++) {
213                 clean_tmp_struct = queue_write_start_ptr;
214                 if (queue_write_start_ptr != queue_write_end_ptr) {
215                     queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
216                     buffer_free(clean_tmp_struct->buffer, queue_subbuffer_size);
217                 }
218                 memory_free(clean_tmp_struct);
219             }
220             queue_write_end_ptr = NULL;
221             queue_write_start_ptr = NULL;
222             memory_free(queue_busy);
223             queue_busy = NULL;
224             return result;
225         }
226
227         print_msg(" Buffer allocated = 0x%x, pages_order = %d\n", (unsigned long)queue_write_end_ptr->buffer, pages_order_in_subbuffer);
228
229         if (buffer_rw_init(&queue_write_end_ptr->buffer_sync) != 0) {
230         /* Free all previously allocated memory */
231             int j;
232             struct swap_buffer *clean_tmp_struct = queue_write_start_ptr;
233
234             result = -4;
235             for (j = 0; j < i; j++) {
236                 clean_tmp_struct = queue_write_start_ptr;
237                 if (queue_write_start_ptr != queue_write_end_ptr) {
238                     queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
239                 }
240                 buffer_free(clean_tmp_struct->buffer, queue_subbuffer_size);
241                 memory_free(clean_tmp_struct);
242             }
243             queue_write_end_ptr = NULL;
244             queue_write_start_ptr = NULL;
245             memory_free(queue_busy);
246             queue_busy = NULL;
247             return result;
248         }
249
250         /* Buffer initialization */
251         memset(buffer_address(queue_write_end_ptr->buffer), 0,
252                queue_subbuffer_size);
253     }
254
255     return result;
256 }
257
258 int buffer_queue_free(void)
259 {
260     int result = 0;
261     struct swap_buffer* tmp = NULL;
262
263     /*  0 - ok
264      * <0 - set_all_to_read_list() error
265      */
266
267     //TODO Lock read list semaphore to prevent getting subbuffer from read list 
268     /* Set all write buffers to read list */
269     result = set_all_to_read_list();
270
271     if (result < 0) {
272         return result;
273     }
274
275     /* Free buffers and structures memory that are in read list */
276     while (queue_read_start_ptr) {
277         tmp = queue_read_start_ptr;
278         queue_read_start_ptr = queue_read_start_ptr->next_in_queue;
279         buffer_free(tmp->buffer, queue_subbuffer_size);
280         memory_free(tmp);
281     }
282
283     /* Free busy_list */
284     memory_free(queue_busy);
285     queue_busy = NULL;
286
287     queue_subbuffer_size = 0;
288     queue_subbuffer_count = 0;
289     queue_read_start_ptr = NULL;
290     queue_read_end_ptr = NULL;
291     queue_write_start_ptr = NULL;
292     queue_write_end_ptr = NULL;
293
294     return result;
295 }
296
297 static unsigned int is_buffer_enough(struct swap_buffer* subbuffer, size_t size)
298 {
299     return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
300 }
301
302 /* Get first subbuffer from read list */
303 struct swap_buffer* get_from_read_list(void)
304 {
305     struct swap_buffer* result = NULL;
306
307     /* Lock read sync primitive */
308     if (buffer_access_lock(&buffer_read_sync)) {
309         return NULL;
310     }
311
312     if (queue_read_start_ptr == NULL) {
313         result = NULL;
314         goto get_from_read_list_unlock;
315     }
316
317     result = queue_read_start_ptr;
318
319     /* If this is the last readable buffer, queue_read_start_ptr next time will 
320      * points to NULL and that case is handled in the beginning of function
321      */
322     if (queue_read_start_ptr == queue_read_end_ptr) {
323         queue_read_end_ptr = NULL;
324     }
325     queue_read_start_ptr = queue_read_start_ptr->next_in_queue;
326
327 get_from_read_list_unlock:
328     /* Unlock read sync primitive */
329     if (buffer_access_unlock(&buffer_read_sync)) {
330         return NULL;
331     }
332
333     return result;
334 }
335
336 /* Add subbuffer to read list */
337 int add_to_read_list(struct swap_buffer* subbuffer)
338 {
339     int result = 0;
340
341     /* 0 - ok
342      * 1 - cannot lock
343      * 2 - cannot unlock */
344
345     /* Lock read sync primitive */
346     if (buffer_access_lock(&buffer_read_sync)) {
347         result = 1;
348         return result;
349     }
350
351     // TODO Sanitization?
352     if (!queue_read_start_ptr) {
353         queue_read_start_ptr = subbuffer;
354     }
355
356     if (queue_read_end_ptr) {
357         queue_read_end_ptr->next_in_queue = subbuffer;
358
359         queue_read_end_ptr = queue_read_end_ptr->next_in_queue;
360     } else {
361         queue_read_end_ptr = subbuffer;
362     }
363     queue_read_end_ptr->next_in_queue = NULL;
364
365     /* Unlock read sync primitive */
366     if (buffer_access_unlock(&buffer_read_sync)) {
367         result = 2;
368         return result;
369     }
370
371     return result;
372 }
373
374 /* Call add to read list and callback function from driver module */
375 int add_to_read_list_with_callback(struct swap_buffer* subbuffer)
376 {
377     int result = 0;
378
379     result = add_to_read_list(subbuffer);
380     // TODO Handle ret value
381     swap_buffer_callback(subbuffer);
382
383     return result;
384 }
385
386 /* Get first writable subbuffer from write list */
387 struct swap_buffer* get_from_write_list(size_t size)
388 {
389     struct swap_buffer *result = NULL;
390
391     /* Callbacks are called at the end of the function to prevent deadlocks */
392     struct swap_buffer *queue_callback_start_ptr = NULL;
393     struct swap_buffer *queue_callback_end_ptr = NULL;
394     struct swap_buffer *tmp_buffer = NULL;
395
396     /* Lock write list sync primitive */
397     if (buffer_access_lock(&buffer_write_sync)) {
398         return NULL;
399     }
400
401     while (queue_write_start_ptr) {
402         /* If start points to NULL => list is empty => exit */
403         if (!queue_write_start_ptr) {
404             result = NULL;
405             goto get_from_write_list_unlock;
406         }
407
408         /* Get semaphore value. Useful only if we want buffer to write to
409          * several buffers the same time
410          *
411          * We're trying to lock semaphore, and if it is successful, unlocking 
412          * it. Otherwise, going to the next step. */
413         if (buffer_rw_lock(&queue_write_start_ptr->buffer_sync) != 0) {
414             // TODO HOW? HOW is it possible to get there?!
415             result = queue_write_start_ptr;
416             /* If we reached end of the list */
417             if (queue_write_start_ptr == queue_write_end_ptr) {
418                 queue_write_end_ptr = NULL;
419             }
420             /* Move start write pointer */
421             queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
422
423             /* Add to callback list */
424             if (!queue_callback_start_ptr) {
425                 queue_callback_start_ptr = result;
426             }
427             if (queue_callback_end_ptr) {
428                 queue_callback_end_ptr->next_in_queue = result;
429             }
430             queue_callback_end_ptr = result;
431             queue_callback_end_ptr->next_in_queue = NULL;
432
433             result = NULL;
434             continue;
435         }
436         buffer_rw_unlock(&queue_write_start_ptr->buffer_sync);
437
438 // TODO Do something
439
440         if (is_buffer_enough(queue_write_start_ptr, size)) {
441             result = queue_write_start_ptr;
442             break;
443         } else {
444             /* If size is not enough, subbuffers goes to read list */
445             result = queue_write_start_ptr;
446             /* If we reached end of the list */
447             if (queue_write_start_ptr == queue_write_end_ptr) {
448                 queue_write_end_ptr = NULL;
449             }
450             /* Move start write pointer */
451             queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
452
453             /* Add to callback list */
454             if (!queue_callback_start_ptr) {
455                 queue_callback_start_ptr = result;
456             }
457             if (queue_callback_end_ptr) {
458                 queue_callback_end_ptr->next_in_queue = result;
459             }
460             queue_callback_end_ptr = result;
461             queue_callback_end_ptr->next_in_queue = NULL;
462
463             result = NULL;
464         }
465     }
466
467     /* Lock writing semaphore */
468     if (result) {
469         if (buffer_rw_lock(&result->buffer_sync)) {
470             result = NULL;
471             goto get_from_write_list_unlock;
472         }
473     }
474
475 get_from_write_list_unlock:
476     /* Unlock write list sync primitive */
477     if (buffer_access_unlock(&buffer_write_sync)) {
478         if (result) {
479             buffer_rw_unlock(&result->buffer_sync);
480         }
481         return NULL;
482     }
483
484     /* Adding buffers to read list and calling callbacks */
485     for (tmp_buffer = NULL; queue_callback_start_ptr; ) {
486
487         if (queue_callback_start_ptr == queue_callback_end_ptr) {
488             queue_callback_end_ptr = NULL;
489         }
490         tmp_buffer = queue_callback_start_ptr;
491         queue_callback_start_ptr = queue_callback_start_ptr->next_in_queue;
492
493         add_to_read_list_with_callback(tmp_buffer);
494     }
495
496     return result;
497 }
498
499 /* Add subbuffer to write list */
500 int add_to_write_list(struct swap_buffer* subbuffer)
501 {
502     /*  0 - ok
503      * -1 - cannot lock
504      * -2 - cannot unlock */
505
506     if (buffer_access_lock(&buffer_write_sync)) {
507         return -1;
508     }
509
510     /* Reinitialize */
511     memset(buffer_address(subbuffer->buffer), 0, queue_subbuffer_size);
512     subbuffer->full_buffer_part = 0;
513
514     if (!queue_write_start_ptr) {
515         queue_write_start_ptr = subbuffer;
516     }
517
518     if (queue_write_end_ptr) {
519         queue_write_end_ptr->next_in_queue = subbuffer;
520         queue_write_end_ptr = queue_write_end_ptr->next_in_queue;
521     } else {
522         queue_write_end_ptr = subbuffer;
523     }
524     queue_write_end_ptr->next_in_queue = NULL;
525
526     if (buffer_access_unlock(&buffer_write_sync)) {
527         return -2;
528     }
529
530     return 0;
531 }
532
533 /* Add subbuffer to busy list when it is read from out of the buffer */
534 int add_to_busy_list(struct swap_buffer* subbuffer)
535 {
536     /*  0 - ok
537      * -1 - cannot lock
538      * -2 - cannot unlock */
539
540     /* Lock busy sync primitive */
541     if (buffer_access_lock(&buffer_busy_sync)) {
542         return -1;
543     }
544
545     subbuffer->next_in_queue = NULL;
546     queue_busy[queue_busy_last_element] = subbuffer;
547     queue_busy_last_element += 1;
548
549     /* Unlock busy sync primitive */
550     if (buffer_access_unlock(&buffer_busy_sync)) {
551         return -2;
552     }
553
554     return 0;
555 }
556
557 /* Remove subbuffer from busy list when it is released */
558 int remove_from_busy_list(struct swap_buffer* subbuffer)
559 {
560     int result = -1; // For sanitization
561     int i;
562
563     /*  0 - ok
564      * -1 - no such buffer in queue_busy list
565      * -2 - cannot lock
566      * -3 - cannot unlock
567      */
568
569     /* Lock busy list sync primitive */
570     if (buffer_access_lock(&buffer_busy_sync)) {
571         result = -2;
572         return result;
573     }
574
575     /* Sanitization and removing */
576     for (i = 0; i < queue_busy_last_element; i++) {
577         if (queue_busy[i] == subbuffer) {
578             /* Last element goes here and length is down 1 */
579             queue_busy[i] = queue_busy[queue_busy_last_element - 1];
580             queue_busy_last_element -= 1;
581             result = 0;
582             break;
583         }
584     }
585
586     /* Unlock busy list sync primitive */
587     if (buffer_access_unlock(&buffer_busy_sync)) {
588         result = -3;
589         return result;
590     }
591
592     return result;
593 }
594
595 /* Get subbuffers count in read list */
596 /* XXX Think about locks */
597 int get_full_buffers_count(void)
598 {
599     int result = 0;
600     struct swap_buffer* buffer = queue_read_start_ptr;
601
602     /* >=0 - buffers count
603      */
604
605     while (buffer && buffer->full_buffer_part) {
606         result += 1;
607         buffer = buffer->next_in_queue;
608     }
609
610     return result;
611 }
612
613 /* Set all subbuffers in write list to read list */
614 int set_all_to_read_list(void)
615 {
616     int result = 0;
617     struct swap_buffer *buffer = queue_write_start_ptr;
618
619     /*  0 - ok
620      * -1 - sem_wait() error
621      * -2 - sem_post() error
622      * -3 - problems with locking sync primitives
623      * -4 - problems with unlocking sync primitives
624      */
625
626     /* Locking write sync primitive */
627     if (buffer_access_lock(&buffer_write_sync)) {
628         result = -3;
629         return result;
630     }
631
632     while (queue_write_start_ptr) {
633         /* Waiting till semaphore should be posted */
634
635 // TODO To think: It's not bad as it is, but maybe it would be better locking
636 // semaphore while changing its list? (Not bad now, cause buffer should have
637 // already been stopped).
638
639         if (buffer_rw_lock(&buffer->buffer_sync)) {
640             result = -1;
641             goto set_all_to_read_list_unlock;
642         }
643
644         if (buffer_rw_unlock(&buffer->buffer_sync)) {
645             result = -2;
646             goto set_all_to_read_list_unlock;
647         }
648
649         buffer = queue_write_start_ptr;
650
651         /* If we reached end of the list */
652         if (queue_write_start_ptr == queue_write_end_ptr) {
653             queue_write_end_ptr = NULL;
654         }
655         queue_write_start_ptr = queue_write_start_ptr->next_in_queue;
656
657         add_to_read_list(buffer);
658     }
659
660 set_all_to_read_list_unlock:
661     /* Unlocking write primitive */
662     if (buffer_access_unlock(&buffer_write_sync)) {
663         result = -4;
664     }
665     return result;
666 }
667
668 /* Get subbuffers count in busy list */
669 /* XXX Think abount lock */
670 int get_busy_buffers_count(void)
671 {
672     return queue_busy_last_element;
673 }
674
675 /* Get memory pages count in subbuffer */
676 int get_pages_in_subbuffer(void)
677 {
678 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
679     return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;
680 }