db82a2c96d5b372c3dd07e6db03d8d3e11d01d1d
[kernel/swap-modules.git] / buffer / buffer_queue.c
1 /*
2  *  SWAP Buffer Module
3  *  modules/buffer/buffer_queue.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2013
20  *
21  * 2013  Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
22  *
23  */
24
25 /* SWAP buffer queues implementation */
26
27 /* For all memory allocation/deallocation operations, except buffer memory
28  * allocation/deallocation should be used 
29  *  memory_allocation(size_t memory_size)
30  *  memory_free(void *ptr)
31  * defines.
32  * For subbuffer allocation/deallocation operations should be used
33  *  buffer_allocation(size_t subbuffer_size)
34  *  buffer_free(void *ptr, size_t subbuffer_size)
35  * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36  * use the following define:
37  *  buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38  * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39  * It will be ok for user space, but fail in kernel space.
40  *
41  * See space_dep_types_and_def.h for details */
42
43
44
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
48
49 /* Queue structure. Consist of pointers to the first and the last elements of
50  * queue. */
51 struct queue {
52         struct swap_subbuffer *start_ptr;
53         struct swap_subbuffer *end_ptr;
54         struct sync_t queue_sync;
55 };
56
57 /* Write queue */
58 struct queue write_queue = {
59         .start_ptr = NULL,
60         .end_ptr = NULL,
61         .queue_sync = {
62                 .flags = 0x0
63         }
64 };
65
66 /* Read queue */
67 struct queue read_queue = {
68         .start_ptr = NULL,
69         .end_ptr = NULL,
70         .queue_sync = {
71                 .flags = 0x0
72         }
73 };
74
75 /* Pointers array. Points to busy buffers */
76 static struct swap_subbuffer **queue_busy = NULL;
77
78 /* Store last busy element */
79 static unsigned int queue_busy_last_element;
80
81 /* Subbuffers count */
82 static unsigned int queue_subbuffer_count = 0;
83
84 /* One subbuffer size */
85 static size_t queue_subbuffer_size = 0;
86
87 /* Busy list sync */
88 static struct sync_t buffer_busy_sync = {
89         .flags = 0x0
90 };
91
92 /* Memory pages count in one subbuffer */
93 static int pages_order_in_subbuffer = 0;
94
95
96 int buffer_queue_allocation(size_t subbuffer_size,
97                             unsigned int subbuffers_count)
98 {
99         unsigned int i = 0;
100         unsigned int j = 0;
101         unsigned int allocated_buffers = 0;
102         unsigned int allocated_structs = 0;
103         struct swap_subbuffer *clean_tmp_struct;
104         int result;
105
106         /* Static varibles initialization */
107         queue_subbuffer_size = subbuffer_size;
108         queue_subbuffer_count = subbuffers_count;
109         queue_busy_last_element = 0;
110
111         /* Set variable pages_in_subbuffer. It is used for allocation and
112          * deallocation memory pages and its value is returned from
113          * swap_buffer_get() and contains page count in one subbuffer.
114          * All this useful only in kernel space. In userspace it is dummy.*/
115         set_pages_order_in_subbuffer(queue_subbuffer_size);
116         /* Sync primitives initialization */
117         sync_init(&read_queue.queue_sync);
118         sync_init(&write_queue.queue_sync);
119         sync_init(&buffer_busy_sync);
120
121         /* Memory allocation for queue_busy */
122         queue_busy = memory_allocation(sizeof(**queue_busy) * queue_subbuffer_count);
123
124         if (!queue_busy) {
125                 result = -E_SB_NO_MEM_QUEUE_BUSY;
126                 goto buffer_allocation_error_ret;
127         }
128
129         /* Memory allocation for swap_subbuffer structures */
130
131         /* Allocation for first structure. */
132         write_queue.start_ptr = memory_allocation(sizeof(*write_queue.start_ptr));
133
134         if (!write_queue.start_ptr) {
135                 result = -E_SB_NO_MEM_BUFFER_STRUCT;
136                 goto buffer_allocation_queue_busy_free;
137         }
138         allocated_structs++;
139
140
141         write_queue.end_ptr = write_queue.start_ptr;
142
143         write_queue.end_ptr->next_in_queue = NULL;
144         write_queue.end_ptr->full_buffer_part = 0;
145         write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
146         if (!write_queue.end_ptr->data_buffer) {
147                 print_err("Cannot allocate memory for buffer 1\n");
148                 result = -E_SB_NO_MEM_DATA_BUFFER;
149                 goto buffer_allocation_error_free;
150         }
151         allocated_buffers++;
152
153         print_msg(" Buffer allocated = 0x%p\n", write_queue.end_ptr->data_buffer);
154
155         sync_init(&write_queue.end_ptr->buffer_sync);
156
157         /* Buffer initialization */
158         memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
159
160         /* Allocation for other structures. */
161         for (i = 1; i < queue_subbuffer_count; i++) {
162                 write_queue.end_ptr->next_in_queue =
163                     memory_allocation(sizeof(*write_queue.end_ptr->next_in_queue));
164                 if (!write_queue.end_ptr->next_in_queue) {
165                         result = -E_SB_NO_MEM_BUFFER_STRUCT;
166                         goto buffer_allocation_error_free;
167                 }
168                 allocated_structs++;
169
170                 /* Now next write_queue.end_ptr is next */
171                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
172
173                 write_queue.end_ptr->next_in_queue = NULL;
174                 write_queue.end_ptr->full_buffer_part = 0;
175                 write_queue.end_ptr->data_buffer = 
176                         buffer_allocation(queue_subbuffer_size);
177                 if (!write_queue.end_ptr->data_buffer) {
178                         result = -E_SB_NO_MEM_DATA_BUFFER;
179                         goto buffer_allocation_error_free;
180                 }
181                 allocated_buffers++;
182
183                 print_msg(" Buffer allocated = 0x%p, pages_order = %d\n",
184                           write_queue.end_ptr->data_buffer,
185                           pages_order_in_subbuffer);
186
187                 sync_init(&write_queue.end_ptr->buffer_sync);
188
189                 /* Buffer initialization */
190                 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
191                        queue_subbuffer_size);
192         }
193
194         return E_SB_SUCCESS;
195
196         /* In case of errors, this code is called */
197         /* Free all previously allocated memory */
198 buffer_allocation_error_free:
199         clean_tmp_struct = write_queue.start_ptr;
200
201         for (j = 0; j < allocated_structs; j++) {
202                 clean_tmp_struct = write_queue.start_ptr;
203                 if (allocated_buffers) {
204                         buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
205                         allocated_buffers--;
206                 }
207                 if (write_queue.start_ptr != write_queue.end_ptr)
208                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
209                 memory_free(clean_tmp_struct);
210         }
211         write_queue.end_ptr = NULL;
212         write_queue.start_ptr = NULL;
213
214 buffer_allocation_queue_busy_free:
215         memory_free(queue_busy);
216         queue_busy = NULL;
217
218 buffer_allocation_error_ret:
219         return result;
220 }
221
222 int buffer_queue_reset(void)
223 {
224         struct swap_subbuffer *buffer = read_queue.start_ptr;
225
226         /* Check if there are some subbuffers in busy list. If so - return error */
227         if (get_busy_buffers_count())
228                 return -E_SB_UNRELEASED_BUFFERS;
229
230         /* Lock read sync primitive */
231         sync_lock(&read_queue.queue_sync);
232
233         /* Set all subbuffers in read list to write list and reinitialize them */
234         while (read_queue.start_ptr) {
235
236                 /* Lock buffer sync primitive to prevent writing to buffer if it had
237                  * been selected for writing, but still wasn't wrote. */
238                 sync_lock(&buffer->buffer_sync);
239
240                 buffer = read_queue.start_ptr;
241
242                 /* If we reached end of the list */
243                 if (read_queue.start_ptr == read_queue.end_ptr) {
244                         read_queue.end_ptr = NULL;
245                 }
246                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
247
248                 /* Reinit full buffer part */
249                 buffer->full_buffer_part = 0;
250
251                 add_to_write_list(buffer);
252
253                 /* Unlock buffer sync primitive */
254                 sync_unlock(&buffer->buffer_sync);
255         }
256
257         /* Unlock read primitive */
258         sync_unlock(&read_queue.queue_sync);
259
260         return E_SB_SUCCESS;
261 }
262
263 void buffer_queue_free(void)
264 {
265         struct swap_subbuffer *tmp = NULL;
266
267         /* Lock all sync primitives to prevet accessing free memory */
268         sync_lock(&write_queue.queue_sync);
269         sync_lock(&read_queue.queue_sync);
270         sync_lock(&buffer_busy_sync);
271
272         /* Free buffers and structures memory that are in read list */
273         while (read_queue.start_ptr) {
274                 tmp = read_queue.start_ptr;
275                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
276                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
277                 print_msg(" Buffer free = 0x%x\n", (unsigned long)
278                            tmp->data_buffer);
279                 memory_free(tmp);
280         }
281
282         /* Free buffers and structures memory that are in read list */
283         while (write_queue.start_ptr) {
284                 tmp = write_queue.start_ptr;
285                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
286                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
287                 print_msg(" Buffer free = 0x%x\n", (unsigned long)
288                                                                                    tmp->data_buffer);
289                 memory_free(tmp);
290         }
291
292         /* Free busy_list */
293         memory_free(queue_busy);
294         queue_busy = NULL;
295
296         queue_subbuffer_size = 0;
297         queue_subbuffer_count = 0;
298         read_queue.start_ptr = NULL;
299         read_queue.end_ptr = NULL;
300         write_queue.start_ptr = NULL;
301         write_queue.end_ptr = NULL;
302
303         /* Unlock all sync primitives */
304         sync_unlock(&buffer_busy_sync);
305         sync_unlock(&read_queue.queue_sync);
306         sync_unlock(&write_queue.queue_sync);
307 }
308
309 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
310                                      size_t size)
311 {
312         /* XXX Think about checking full_buffer_part for correctness 
313          * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
314          * chaning, etc.) this function should be true! */
315         return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
316 }
317
318 /* Get first subbuffer from read list */
319 struct swap_subbuffer *get_from_read_list(void)
320 {
321         struct swap_subbuffer *result = NULL;
322
323         /* Lock read sync primitive */
324         sync_lock(&read_queue.queue_sync);
325
326         if (read_queue.start_ptr == NULL) {
327                 result = NULL;
328                 goto get_from_read_list_unlock;
329         }
330
331         result = read_queue.start_ptr;
332
333         /* If this is the last readable buffer, read_queue.start_ptr next time will 
334          * points to NULL and that case is handled in the beginning of function
335          */
336         if (read_queue.start_ptr == read_queue.end_ptr) {
337                 read_queue.end_ptr = NULL;
338         }
339         read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
340
341 get_from_read_list_unlock:
342         /* Unlock read sync primitive */
343         sync_unlock(&read_queue.queue_sync);
344
345         return result;
346 }
347
348 /* Add subbuffer to read list */
349 void add_to_read_list(struct swap_subbuffer *subbuffer)
350 {
351
352         /* Lock read sync primitive */
353         sync_lock(&read_queue.queue_sync);
354
355         if (!read_queue.start_ptr)
356                 read_queue.start_ptr = subbuffer;
357
358         if (read_queue.end_ptr) {
359                 read_queue.end_ptr->next_in_queue = subbuffer;
360
361                 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
362         } else {
363                 read_queue.end_ptr = subbuffer;
364         }
365         read_queue.end_ptr->next_in_queue = NULL;
366
367         /* Unlock read sync primitive */
368         sync_unlock(&read_queue.queue_sync);
369 }
370
371 /* Call add to read list and callback function from driver module */
372 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
373 {
374         int result = 0;
375
376         add_to_read_list(subbuffer);
377         // TODO Handle ret value
378         result = swap_buffer_callback(subbuffer);
379
380         return result;
381 }
382
383 /* Get first writable subbuffer from write list */
384 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
385 {
386         struct swap_subbuffer *result = NULL;
387
388         /* Callbacks are called at the end of the function to prevent deadlocks */
389         struct queue callback_queue = {
390                 .start_ptr = NULL,
391                 .end_ptr = NULL,
392                 .queue_sync = {
393                         .flags = 0x0
394                 }
395         };
396         struct swap_subbuffer *tmp_buffer = NULL;
397
398         /* Init pointer */
399         *ptr_to_write = NULL;
400
401         /* Lock write list sync primitive */
402         sync_lock(&write_queue.queue_sync);
403
404         while (write_queue.start_ptr) {
405                 /* If start points to NULL => list is empty => exit */
406                 if (!write_queue.start_ptr) {
407                         result = NULL;
408                         goto get_from_write_list_unlock;
409                 }
410
411                 /* We're found subbuffer */
412                 if (is_buffer_enough(write_queue.start_ptr, size)) {
413
414                         result = write_queue.start_ptr;
415                         *ptr_to_write = (void *)((unsigned long)
416                                                  (buffer_address(result->data_buffer)) +
417                                                  result->full_buffer_part);
418
419                         /* Add data size to full_buffer_part. Very important to do it in
420                          * write_queue.queue_sync spinlock */
421                         write_queue.start_ptr->full_buffer_part += size;
422
423                         /* Lock rw sync. Should be unlocked in swap_buffer_write() */
424                         sync_lock(&result->buffer_sync);
425                         break;
426                 /* This subbuffer is not enough => it goes to read list */
427                 } else {
428                         result = write_queue.start_ptr;
429
430                         /* If we reached end of the list */
431                         if (write_queue.start_ptr == write_queue.end_ptr) {
432                                 write_queue.end_ptr = NULL;
433                         }
434
435                         /* Move start write pointer */
436                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
437
438                         /* Add to callback list */
439                         if (!callback_queue.start_ptr)
440                                 callback_queue.start_ptr = result;
441
442                         if (callback_queue.end_ptr)
443                                 callback_queue.end_ptr->next_in_queue = result;
444                         callback_queue.end_ptr = result;
445                         callback_queue.end_ptr->next_in_queue = NULL;
446                         result = NULL;
447                 }
448         }
449
450 get_from_write_list_unlock:
451         /* Unlock write list sync primitive */
452         sync_unlock(&write_queue.queue_sync);
453
454         /* Adding buffers to read list and calling callbacks */
455         for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
456                 if (callback_queue.start_ptr == callback_queue.end_ptr)
457                         callback_queue.end_ptr = NULL;
458
459                 tmp_buffer = callback_queue.start_ptr;
460                 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
461
462                 add_to_read_list_with_callback(tmp_buffer);
463         }
464
465         return result;
466 }
467
468 /* Add subbuffer to write list */
469 void add_to_write_list(struct swap_subbuffer *subbuffer)
470 {
471         sync_lock(&write_queue.queue_sync);
472
473         /* Reinitialize */
474         // TODO Useless memset
475 //      memset(buffer_address(subbuffer->data_buffer), 0, queue_subbuffer_size);
476         subbuffer->full_buffer_part = 0;
477
478         if (!write_queue.start_ptr)
479                 write_queue.start_ptr = subbuffer;
480
481         if (write_queue.end_ptr) {
482                 write_queue.end_ptr->next_in_queue = subbuffer;
483                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
484         } else {
485                 write_queue.end_ptr = subbuffer;
486         }
487         write_queue.end_ptr->next_in_queue = NULL;
488
489         sync_unlock(&write_queue.queue_sync);
490 }
491
492 /* Add subbuffer to busy list when it is read from out of the buffer */
493 void add_to_busy_list(struct swap_subbuffer *subbuffer)
494 {
495         /* Lock busy sync primitive */
496         sync_lock(&buffer_busy_sync);
497
498         subbuffer->next_in_queue = NULL;
499         queue_busy[queue_busy_last_element] = subbuffer;
500         queue_busy_last_element += 1;
501
502         /* Unlock busy sync primitive */
503         sync_unlock(&buffer_busy_sync);
504 }
505
506 /* Remove subbuffer from busy list when it is released */
507 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
508 {
509         int result = -E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
510         int i;
511
512         /* Lock busy list sync primitive */
513         sync_lock(&buffer_busy_sync);
514
515         /* Sanitization and removing */
516         for (i = 0; i < queue_busy_last_element; i++) {
517                 if (queue_busy[i] == subbuffer) {
518                         /* Last element goes here and length is down 1 */
519                         queue_busy[i] = queue_busy[queue_busy_last_element - 1];
520                         queue_busy_last_element -= 1;
521                         result = E_SB_SUCCESS;
522                         break;
523                 }
524         }
525
526         /* Unlock busy list sync primitive */
527         sync_unlock(&buffer_busy_sync);
528
529         return result;
530 }
531
532 /* Get subbuffers count in read list */
533 /* XXX Think about locks */
534 int get_full_buffers_count(void)
535 {
536         int result = 0;
537         struct swap_subbuffer *buffer = read_queue.start_ptr;
538
539         while (buffer && buffer->full_buffer_part) {
540                 result += 1;
541                 buffer = buffer->next_in_queue;
542         }
543
544         return result;
545 }
546
547 /* Set all subbuffers in write list to read list */
548 void buffer_queue_flush(void)
549 {
550         struct swap_subbuffer *buffer = write_queue.start_ptr;
551
552         /* Locking write sync primitive */
553         sync_lock(&write_queue.queue_sync);
554
555         while (write_queue.start_ptr &&
556                write_queue.start_ptr->full_buffer_part) {
557
558                 /* Lock buffer sync primitive to prevent writing to buffer if it had
559                  * been selected for writing, but still wasn't wrote. */
560                 sync_lock(&buffer->buffer_sync);
561
562                 buffer = write_queue.start_ptr;
563
564                 /* If we reached end of the list */
565                 if (write_queue.start_ptr == write_queue.end_ptr) {
566                         write_queue.end_ptr = NULL;
567                 }
568                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
569
570                 add_to_read_list(buffer);
571
572                 /* Unlock buffer sync primitive */
573                 sync_unlock(&buffer->buffer_sync);
574         }
575
576         /* Unlock write primitive */
577         sync_unlock(&write_queue.queue_sync);
578 }
579
580 /* Get subbuffers count in busy list */
581 int get_busy_buffers_count(void)
582 {
583         int result;
584
585         sync_lock(&buffer_busy_sync);
586         result = queue_busy_last_element;
587         sync_unlock(&buffer_busy_sync);
588
589         return result;
590 }
591
592 /* Get memory pages count in subbuffer */
593 int get_pages_count_in_subbuffer(void)
594 {
595 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
596         return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;
597 }