Merge commit '9cc656e464' into kernel
[kernel/swap-modules.git] / buffer / buffer_queue.c
1 /*
2  *  SWAP Buffer Module
3  *  modules/buffer/buffer_queue.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2013
20  *
21  * 2013  Alexander Aksenov <a.aksenov@samsung.com>: SWAP Buffer implement
22  *
23  */
24
25 /* SWAP buffer queues implementation */
26
27 /* For all memory allocation/deallocation operations, except buffer memory
28  * allocation/deallocation should be used 
29  *  memory_allocation(size_t memory_size)
30  *  memory_free(void *ptr)
31  * defines.
32  * For subbuffer allocation/deallocation operations should be used
33  *  buffer_allocation(size_t subbuffer_size)
34  *  buffer_free(void *ptr, size_t subbuffer_size)
35  * To get buffer pointer for any usage, EXCEPT ALLOCATION AND DEALLOCATION
36  * use the following define:
37  *  buffer_pointer(void *ptr_to_buffer_element_of_swap_buffer_structure)
38  * DO NOT USE SUBBUFFER PTR IN STRUCT SWAP_BUFFER WITHOUT THIS DEFINE!
39  * It will be ok for user space, but fail in kernel space.
40  *
41  * See space_dep_types_and_def.h for details */
42
43
44
45 #include "buffer_queue.h"
46 #include "swap_buffer_to_buffer_queue.h"
47 #include "swap_buffer_errors.h"
48
49 /* Queue structure. Consist of pointers to the first and the last elements of
50  * queue. */
51 struct queue {
52         struct swap_subbuffer *start_ptr;
53         struct swap_subbuffer *end_ptr;
54         sync_t queue_sync;
55 };
56
57 /* Write queue */
58 struct queue write_queue = {
59         .start_ptr = NULL,
60         .end_ptr = NULL
61 };
62
63 /* Read queue */
64 struct queue read_queue = {
65         .start_ptr = NULL,
66         .end_ptr = NULL
67 };
68
69 /* Pointers array. Points to busy buffers */
70 static struct swap_buffer **queue_busy = NULL;
71
72 /* Store last busy element */
73 static unsigned int queue_busy_last_element;
74
75 /* Subbuffers count */
76 static unsigned int queue_subbuffer_count = 0;
77
78 /* One subbuffer size */
79 static size_t queue_subbuffer_size = 0;
80
81 /* Busy list sync */
82 static sync_t buffer_busy_sync;
83
84 /* Memory pages count in one subbuffer */
85 static int pages_order_in_subbuffer = 0;
86
87
88 int buffer_queue_allocation(size_t subbuffer_size,
89                             unsigned int subbuffers_count)
90 {
91         unsigned int i = 0;
92         unsigned int j = 0;
93         unsigned int allocated_buffers = 0;
94         unsigned int allocated_structs = 0;
95         struct swap_subbuffer *clean_tmp_struct;
96         int result;
97
98         /* Static varibles initialization */
99         queue_subbuffer_size = subbuffer_size;
100         queue_subbuffer_count = subbuffers_count;
101         queue_busy_last_element = 0;
102
103         /* Set variable pages_in_subbuffer. It is used for allocation and
104          * deallocation memory pages and its value is returned from
105          * swap_buffer_get() and contains page count in one subbuffer.
106          * All this useful only in kernel space. In userspace it is dummy.*/
107         set_pages_order_in_subbuffer(queue_subbuffer_size);
108         /* Sync primitives initialization */
109         sync_init(&read_queue.queue_sync);
110         sync_init(&write_queue.queue_sync);
111         sync_init(&buffer_busy_sync);
112
113         /* Memory allocation for queue_busy */
114         queue_busy = memory_allocation(sizeof(&queue_busy) * queue_subbuffer_count);
115
116         if (!queue_busy) {
117                 result = E_SB_NO_MEM_QUEUE_BUSY;
118                 goto buffer_allocation_error_ret;
119         }
120
121         /* Memory allocation for swap_subbuffer structures */
122
123         /* Allocation for first structure. */
124         write_queue.start_ptr = memory_allocation(sizeof(&write_queue.start_ptr));
125
126         if (!write_queue.start_ptr) {
127                 result = E_SB_NO_MEM_BUFFER_STRUCT;
128                 goto buffer_allocation_queue_busy_free;
129         }
130         allocated_structs++;
131
132
133         write_queue.end_ptr = write_queue.start_ptr;
134
135         write_queue.end_ptr->next_in_queue = NULL;
136         write_queue.end_ptr->full_buffer_part = 0;
137         write_queue.end_ptr->data_buffer = buffer_allocation(queue_subbuffer_size);
138         if (!write_queue.end_ptr->data_buffer) {
139                 print_err("Cannot allocate memory for buffer 1\n");
140                 result = E_SB_NO_MEM_DATA_BUFFER;
141                 goto buffer_allocation_error_free;
142         }
143         allocated_buffers++;
144
145         print_msg(" Buffer allocated = 0x%x\n", (unsigned long)write_queue.end_ptr->data_buffer);
146
147         sync_init(&write_queue.end_ptr->buffer_sync);
148
149         /* Buffer initialization */
150         memset(buffer_address(write_queue.end_ptr->data_buffer), 0, queue_subbuffer_size);
151
152         /* Allocation for other structures. */
153         for (i = 1; i < queue_subbuffer_count; i++) {
154                 write_queue.end_ptr->next_in_queue =
155                     memory_allocation(sizeof(write_queue.end_ptr->next_in_queue));
156                 if (!write_queue.end_ptr->next_in_queue) {
157                         result = E_SB_NO_MEM_BUFFER_STRUCT;
158                         goto buffer_allocation_error_free;
159                 }
160                 allocated_structs++;
161
162                 /* Now next write_queue.end_ptr is next */
163                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
164
165                 write_queue.end_ptr->next_in_queue = NULL;
166                 write_queue.end_ptr->full_buffer_part = 0;
167                 write_queue.end_ptr->data_buffer = 
168                         buffer_allocation(queue_subbuffer_size);
169                 if (!write_queue.end_ptr->data_buffer) {
170                         result = E_SB_NO_MEM_DATA_BUFFER;
171                         goto buffer_allocation_error_free;
172                 }
173                 allocated_buffers++;
174
175                 print_msg(" Buffer allocated = 0x%x, pages_order = %d\n", 
176                           (unsigned long)buffer_address(write_queue.end_ptr->data_buffer), 
177                           pages_order_in_subbuffer);
178
179                 sync_init(&write_queue.end_ptr->buffer_sync);
180
181                 /* Buffer initialization */
182                 memset(buffer_address(write_queue.end_ptr->data_buffer), 0,
183                        queue_subbuffer_size);
184         }
185
186         return E_SB_SUCCESS;
187
188         /* In case of errors, this code is called */
189         /* Free all previously allocated memory */
190 buffer_allocation_error_free:
191         clean_tmp_struct = write_queue.start_ptr;
192
193         for (j = 0; j < allocated_structs; j++) {
194                 clean_tmp_struct = write_queue.start_ptr;
195                 if (allocated_buffers) {
196                         buffer_free(clean_tmp_struct->data_buffer, queue_subbuffer_size);
197                         allocated_buffers--;
198                 }
199                 if (write_queue.start_ptr != write_queue.end_ptr)
200                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
201                 memory_free(clean_tmp_struct);
202         }
203         write_queue.end_ptr = NULL;
204         write_queue.start_ptr = NULL;
205
206 buffer_allocation_queue_busy_free:
207         memory_free(queue_busy);
208         queue_busy = NULL;
209
210 buffer_allocation_error_ret:
211         return result;
212 }
213
214 void buffer_queue_free(void)
215 {
216         struct swap_subbuffer *tmp = NULL;
217
218         //TODO Lock read list semaphore to prevent getting subbuffer from read list 
219         /* Set all write buffers to read list */
220         set_all_to_read_list();
221
222         /* Free buffers and structures memory that are in read list */
223         while (read_queue.start_ptr) {
224                 tmp = read_queue.start_ptr;
225                 read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
226                 buffer_free(tmp->data_buffer, queue_subbuffer_size);
227                 print_msg(" Buffer free = 0x%x\n", (unsigned long)
228                            buffer_address(tmp->data_buffer));
229                 memory_free(tmp);
230         }
231
232         /* Free busy_list */
233         memory_free(queue_busy);
234         queue_busy = NULL;
235
236         queue_subbuffer_size = 0;
237         queue_subbuffer_count = 0;
238         read_queue.start_ptr = NULL;
239         read_queue.end_ptr = NULL;
240         write_queue.start_ptr = NULL;
241         write_queue.end_ptr = NULL;
242 }
243
244 static unsigned int is_buffer_enough(struct swap_subbuffer *subbuffer,
245                                      size_t size)
246 {
247         /* XXX Think about checking full_buffer_part for correctness 
248          * (<queue_subbuffer_size). It should be true, but if isn't (due to sources
249          * chaning, etc.) this function should be true! */
250         return ((queue_subbuffer_size-subbuffer->full_buffer_part) >= size) ? 1 : 0;
251 }
252
253 /* Get first subbuffer from read list */
254 struct swap_subbuffer *get_from_read_list(void)
255 {
256         struct swap_subbuffer *result = NULL;
257
258         /* Lock read sync primitive */
259         sync_lock(&read_queue.queue_sync);
260
261         if (read_queue.start_ptr == NULL) {
262                 result = NULL;
263                 goto get_from_read_list_unlock;
264         }
265
266         result = read_queue.start_ptr;
267
268         /* If this is the last readable buffer, read_queue.start_ptr next time will 
269          * points to NULL and that case is handled in the beginning of function
270          */
271         if (read_queue.start_ptr == read_queue.end_ptr) {
272                 read_queue.end_ptr = NULL;
273         }
274         read_queue.start_ptr = read_queue.start_ptr->next_in_queue;
275
276 get_from_read_list_unlock:
277         /* Unlock read sync primitive */
278         sync_unlock(&read_queue.queue_sync);
279
280         return result;
281 }
282
283 /* Add subbuffer to read list */
284 void add_to_read_list(struct swap_subbuffer *subbuffer)
285 {
286
287         /* Lock read sync primitive */
288         sync_lock(&read_queue.queue_sync);
289
290         if (!read_queue.start_ptr)
291                 read_queue.start_ptr = subbuffer;
292
293         if (read_queue.end_ptr) {
294                 read_queue.end_ptr->next_in_queue = subbuffer;
295
296                 read_queue.end_ptr = read_queue.end_ptr->next_in_queue;
297         } else {
298                 read_queue.end_ptr = subbuffer;
299         }
300         read_queue.end_ptr->next_in_queue = NULL;
301
302         /* Unlock read sync primitive */
303         sync_unlock(&read_queue.queue_sync);
304 }
305
306 /* Call add to read list and callback function from driver module */
307 int add_to_read_list_with_callback(struct swap_subbuffer *subbuffer)
308 {
309         int result = 0;
310
311         add_to_read_list(subbuffer);
312         // TODO Handle ret value
313         result = swap_buffer_callback(subbuffer);
314
315         return result;
316 }
317
318 /* Get first writable subbuffer from write list */
319 struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
320 {
321         struct swap_subbuffer *result = NULL;
322
323         /* Callbacks are called at the end of the function to prevent deadlocks */
324         struct queue callback_queue = {
325                 .start_ptr = NULL,
326                 .end_ptr = NULL
327         };
328         struct swap_subbuffer *tmp_buffer = NULL;
329
330         /* Init pointer */
331         *ptr_to_write = NULL;
332
333         /* Lock write list sync primitive */
334         sync_lock(&write_queue.queue_sync);
335
336         while (write_queue.start_ptr) {
337                 /* If start points to NULL => list is empty => exit */
338                 if (!write_queue.start_ptr) {
339                         result = NULL;
340                         goto get_from_write_list_unlock;
341                 }
342
343                 /* We're found subbuffer */
344                 if (is_buffer_enough(write_queue.start_ptr, size)) {
345
346                         result = write_queue.start_ptr;
347                         *ptr_to_write = (void *)((unsigned long)
348                                                  (buffer_address(result->data_buffer)) +
349                                                  result->full_buffer_part);
350
351                         /* Add data size to full_buffer_part. Very important to do it in
352                          * write_queue.queue_sync spinlock */
353                         write_queue.start_ptr->full_buffer_part += size;
354
355                         /* Lock rw sync. Should be unlocked in swap_buffer_write() */
356                         sync_lock(&result->buffer_sync);
357                         break;
358                 /* This subbuffer is not enough => it goes to read list */
359                 } else {
360
361                         result = write_queue.start_ptr;
362
363                         /* If we reached end of the list */
364                         if (write_queue.start_ptr == write_queue.end_ptr) {
365                                 write_queue.end_ptr = NULL;
366                         }
367
368                         /* Move start write pointer */
369                         write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
370
371                         /* Add to callback list */
372                         if (!callback_queue.start_ptr)
373                                 callback_queue.start_ptr = result;
374
375                         if (callback_queue.end_ptr)
376                                 callback_queue.end_ptr->next_in_queue = result;
377                         callback_queue.end_ptr = result;
378                         callback_queue.end_ptr->next_in_queue = NULL;
379                         result = NULL;
380                 }
381         }
382
383 get_from_write_list_unlock:
384         /* Unlock write list sync primitive */
385         sync_unlock(&write_queue.queue_sync);
386
387         /* Adding buffers to read list and calling callbacks */
388         for (tmp_buffer = NULL; callback_queue.start_ptr; ) {
389                 if (callback_queue.start_ptr == callback_queue.end_ptr)
390                         callback_queue.end_ptr = NULL;
391
392                 tmp_buffer = callback_queue.start_ptr;
393                 callback_queue.start_ptr = callback_queue.start_ptr->next_in_queue;
394
395                 add_to_read_list_with_callback(tmp_buffer);
396         }
397
398         return result;
399 }
400
401 /* Add subbuffer to write list */
402 void add_to_write_list(struct swap_subbuffer *subbuffer)
403 {
404         sync_lock(&write_queue.queue_sync);
405
406         /* Reinitialize */
407         // TODO Useless memset
408 //      memset(buffer_address(subbuffer->data_buffer), 0, queue_subbuffer_size);
409         subbuffer->full_buffer_part = 0;
410
411         if (!write_queue.start_ptr)
412                 write_queue.start_ptr = subbuffer;
413
414         if (write_queue.end_ptr) {
415                 write_queue.end_ptr->next_in_queue = subbuffer;
416                 write_queue.end_ptr = write_queue.end_ptr->next_in_queue;
417         } else {
418                 write_queue.end_ptr = subbuffer;
419         }
420         write_queue.end_ptr->next_in_queue = NULL;
421
422         sync_unlock(&write_queue.queue_sync);
423 }
424
425 /* Add subbuffer to busy list when it is read from out of the buffer */
426 void add_to_busy_list(struct swap_subbuffer *subbuffer)
427 {
428         /* Lock busy sync primitive */
429         sync_lock(&buffer_busy_sync);
430
431         subbuffer->next_in_queue = NULL;
432         queue_busy[queue_busy_last_element] = subbuffer;
433         queue_busy_last_element += 1;
434
435         /* Unlock busy sync primitive */
436         sync_unlock(&buffer_busy_sync);
437 }
438
439 /* Remove subbuffer from busy list when it is released */
440 int remove_from_busy_list(struct swap_subbuffer *subbuffer)
441 {
442         int result = E_SB_NO_SUBBUFFER_IN_BUSY; // For sanitization
443         int i;
444
445         /* Lock busy list sync primitive */
446         sync_lock(&buffer_busy_sync);
447
448         /* Sanitization and removing */
449         for (i = 0; i < queue_busy_last_element; i++) {
450                 if (queue_busy[i] == subbuffer) {
451                         /* Last element goes here and length is down 1 */
452                         queue_busy[i] = queue_busy[queue_busy_last_element - 1];
453                         queue_busy_last_element -= 1;
454                         result = E_SB_SUCCESS;
455                         break;
456                 }
457         }
458
459         /* Unlock busy list sync primitive */
460         sync_unlock(&buffer_busy_sync);
461
462         return result;
463 }
464
465 /* Get subbuffers count in read list */
466 /* XXX Think about locks */
467 int get_full_buffers_count(void)
468 {
469         int result = 0;
470         struct swap_subbuffer *buffer = read_queue.start_ptr;
471
472         while (buffer && buffer->full_buffer_part) {
473                 result += 1;
474                 buffer = buffer->next_in_queue;
475         }
476
477         return result;
478 }
479
480 /* Set all subbuffers in write list to read list */
481 void set_all_to_read_list(void)
482 {
483         struct swap_subbuffer *buffer = write_queue.start_ptr;
484
485         /* Locking write sync primitive */
486         sync_lock(&write_queue.queue_sync);
487
488         while (write_queue.start_ptr) {
489                 /* Waiting till semaphore should be posted */
490
491 // TODO To think: It's not bad as it is, but maybe it would be better locking
492 // semaphore while changing its list? (Not bad now, cause buffer should have
493 // already been stopped).
494
495                 sync_lock(&buffer->buffer_sync);
496
497                 sync_unlock(&buffer->buffer_sync);
498
499                 buffer = write_queue.start_ptr;
500
501                 /* If we reached end of the list */
502                 if (write_queue.start_ptr == write_queue.end_ptr) {
503                         write_queue.end_ptr = NULL;
504                 }
505                 write_queue.start_ptr = write_queue.start_ptr->next_in_queue;
506
507                 add_to_read_list(buffer);
508         }
509
510         /* Unlocking write primitive */
511         sync_unlock(&write_queue.queue_sync);
512 }
513
514 /* Get subbuffers count in busy list */
515 /* XXX Think abount lock */
516 int get_busy_buffers_count(void)
517 {
518         return queue_busy_last_element;
519 }
520
521 /* Get memory pages count in subbuffer */
522 int get_pages_count_in_subbuffer(void)
523 {
524 /* Return 1 if pages order 0, or 2 of power pages_order_in_subbuffer otherwise */
525         return (pages_order_in_subbuffer) ? 2 << (pages_order_in_subbuffer - 1) : 1;
526 }