0a9b40dcf9ae5f4abf0d4231e5b87bad878ed779
[platform/core/system/resourced.git] / src / resource-optimizer / memory / compaction / compaction.c
1 /*
2  * resourced:compaction
3  *
4  * Copyright (c) 2015 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <glib.h>
25 #include <time.h>
26 #include <unistd.h>
27 #include <sys/time.h>
28 #include <sys/types.h>
29
30 #include <sys/syscall.h>
31
32 #include "config-parser.h"
33 #include "module.h"
34 #include "macro.h"
35 #include "memory-cgroup.h"
36 #include "notifier.h"
37 #include "procfs.h"
38 #include "resourced.h"
39 #include "trace.h"
40 #include "util.h"
41 #include "compact-common.h"
42
43 /**
44  * State bit for zone's fragmentation warning
45  * ZONE_FRAG_WARN_RAISE bit is set for each zone
46  * when the fragmentation level reaches specified
47  * value for at least one of supported page orders.
48  */
49 #define ZONE_FRAG_WARN_RAISE    (0x1 << 0)
50
51 /* Internal compaction module states */
52 #define COMPACT_IDLE            (0)
53 /* State NOTIFIED is to eliminate spurious thread wakeups */
54 #define COMPACT_NOTIFIED        (1 << 0)
55 /* Failed to write to procfs entry */
56 #define COMPACT_FAILURE         (1 << 1)
57 /**
58  * Failed to perform one of the basic operations:
59  * like reading /proc/zoneinfo or /proc/pagetypeinfo
60  * Set to indicate that there is no point of return
61  * and that the compaction thread should/can safely
62  * clean things up and call it a day.
63  **/
64 #define COMPACT_WITHDRAW        (1 << 2)
65 /**
66  * Set when compaction module has been explicitly
67  * requested to quit
68  */
69 #define COMPACT_CANCEL          (1 << 3)
70 #define COMPACT_SKIP            (COMPACT_WITHDRAW | COMPACT_CANCEL)
71
72 #define MAX_PAGE_ORDER          0xa
73 #define ZONE_MAX_NR             4
74
75 #define PROC_COMPACT_ENTRY      "/proc/sys/vm/compact_memory"
76 #define COMPACT_CONF_FILE                   RD_CONFIG_FILE(optimizer)
77
78 #define COMPACT_CONFIG_SECTION  "Compaction"
79 #define COMPACT_CONFIG_ENABLE   "CompactEnable"
80 #define COMPACT_CONFIG_FRAG     "Fraglevel"
81
82 /**
83  * Default frag level (percentage, order-based) which determines
84  * when to trigger compaction.
85  */
86 #define COMPACT_DEF_FRAG_LEVEL  800 /* 80% */
87
88 /*
89  * Note: Tightly coupled with corresponding ids.
90  * Mind while modifying.
91  */
92 static const char *zone_names[] = {"Normal", "DMA", "HighMem", "DMA32"};
93
94 struct parser_data {
95         struct memory_info      *mem_info;
96         struct zone_info        *zone;
97 };
98
99 struct zone_info {
100         unsigned int            id;
101         unsigned long           pages_per_order[MAX_PAGE_ORDER +1];
102         unsigned long           free_pages;
103         unsigned long           wm_min;
104         unsigned long           wm_low;
105         unsigned long           wm_high;
106         unsigned long           managed;
107         unsigned int            frag_map:MAX_PAGE_ORDER+1;
108         unsigned int            frag_warn:2;
109 };
110
111 struct memory_info {
112         unsigned int            zone_count;
113         struct zone_info        zones[ZONE_MAX_NR];
114 };
115
116 struct compact_control {
117         struct memory_info      *mem_info;
118         pthread_t               compact_thread;
119         pthread_mutex_t         lock;
120         int             frag_level;
121         unsigned int            status;
122         unsigned int            compact_type;
123 };
124
125 #define PARSE_TAG(exp, fn, id)          \
126         {                               \
127                 .re_exp  = exp,         \
128                 .callback = fn,         \
129                 .tag = PARSE_TAG_##id,  \
130         }
131
132 #define PARSE_TAG_EMPTY()  {0,}
133
134 /*
135  * @TODO: This should be attached to module ops
136  */
137 struct compact_data {
138         struct compact_control *compact;
139         pthread_mutex_t notify_lock;
140         pthread_cond_t  notify;
141         pthread_mutex_t drained_lock;
142         pthread_cond_t  drained;
143 };
144
145 static struct compact_data compact_data = {
146         .notify_lock    = PTHREAD_MUTEX_INITIALIZER,
147         .notify         = PTHREAD_COND_INITIALIZER,
148         .drained_lock   = PTHREAD_MUTEX_INITIALIZER,
149         .drained        = PTHREAD_COND_INITIALIZER
150 };
151
152 static inline unsigned int get_zone_id(const char *zone_name, size_t len)
153 {
154         int i;
155
156         for (i = 0; i < ARRAY_SIZE(zone_names); ++i) {
157                 if (!strncmp(zone_name, zone_names[i], len))
158                         return 1 << i;
159         }
160         return 0;
161 }
162
163 static inline const char *get_zone_name(unsigned int zone_id)
164 {
165         unsigned int i = ffs(zone_id) - 1;
166
167         return (i < ARRAY_SIZE(zone_names)) ? zone_names[i] : NULL;
168 }
169
170 /*
171  * External fragmentation is an issue but one that mostly kernel
172  * should be concerned about, not the user space.
173  * Still though, fragmented *physical* memory may (but does not
174  * have to) lead to system getting less responsive - as the kernel
175  * might get trapped waiting for allocation of high-order
176  * physically-contiguous pages. The fragmentation issue gets more
177  * significant in case of huge pages - thought this is left aside
178  * due to not being relevant, in this particular case.
179  *
180  * Triggering the compaction from the user-space is a rather
181  * nasty hack. But if this is to be done, than...
182  * 1. There is not much point in triggering compaction if
183  *    the system is already at a heavy memory pressure
184  *    and it is struggling to satisfy 0-order allocations
185  * 2. Specifying the overall fragmentation level is quite tricky
186  *    and without some decent background of what is/has been going
187  *    on as far as memory allocations are being concern (both from
188  *    user-space and from the kernel) is not really reliable, to say
189  *    at least. All in all, what's the acceptable (whatever it means)
190  *    level for external fragmentation? Having most of the available
191  *    memory within the low-order page blocks should raise an alert,
192  *    but that's only in theory. Things get more complicated when taking
193  *    into consideration the migration types of available pages.
194  *    This might go wrong in so many ways .....
195  *    Shall this be continued ?
196  */
197 static void compaction_start(struct compact_control *compact)
198 {
199         struct memory_info *mem_info;
200         int current_status = COMPACT_IDLE;
201         _cleanup_close_ int fd = -1;
202         int n = 1;
203
204         pthread_mutex_lock(&compact->lock);
205
206         if (compact->status & COMPACT_SKIP)
207                 current_status |= COMPACT_WITHDRAW;
208
209         pthread_mutex_unlock(&compact->lock);
210
211         if (current_status & COMPACT_WITHDRAW)
212                 return;
213
214         mem_info = compact->mem_info;
215
216
217         fd = open(PROC_COMPACT_ENTRY,  O_WRONLY);
218         if (fd < 0) {
219                 if (errno == EACCES || errno == EFAULT || errno == ENOENT)
220                         current_status |= COMPACT_WITHDRAW;
221                 _E("Compaction: failed to open procfs entry [%d]\n", errno);
222                 goto leave;
223         }
224         /*
225          * It doesn't really matter what gets written,
226          * as long as smth gets....
227          */
228         if (write(fd, &n, sizeof(n)) <= 0)
229                 current_status |= COMPACT_FAILURE;
230         /*
231          * Reset the external fragmentation warnings.
232          * Locking is not required here as all updates will get suspended
233          * until the compaction status won't indicate all is done here
234          */
235         if (current_status & COMPACT_FAILURE)
236                 goto leave;
237
238         for (n = 0; n < mem_info->zone_count; ++n)
239                 mem_info->zones[n].frag_warn &= ~ZONE_FRAG_WARN_RAISE;
240 leave:
241
242         pthread_mutex_lock(&compact->lock);
243         compact->status |= current_status;
244         pthread_mutex_unlock(&compact->lock);
245 }
246
247 static void compact_validate_zone(struct zone_info *zone,
248                                   unsigned int frag_level,
249                                   struct memory_info *mem_info)
250 {
251         int order, req_order;
252         unsigned int current_frag_map = 0;
253         /*
254          * Skip compaction if the system is below the low watermark.
255          * It's gonna be done either way
256          */
257         if (zone->free_pages < zone->wm_low) {
258                 _I("Skipping validation due to falling below the low watermark\n");
259                 _I("Zone %s: number of free pages: %lu low watermark: %lu\n",
260                                 get_zone_name(zone->id),
261                                 zone->free_pages, zone->wm_low);
262                 return;
263         }
264
265         for (req_order = 1; req_order <= MAX_PAGE_ORDER; ++req_order) {
266                 unsigned long available_pages = 0;
267
268                 for (order = req_order; order <= MAX_PAGE_ORDER; ++order)
269                         available_pages += zone->pages_per_order[order] << order;
270
271                 if (zone->free_pages > 0 && (1000 - (available_pages * 1000 / zone->free_pages)) >= frag_level)
272                         current_frag_map |= 1 << req_order;
273         }
274
275         if (current_frag_map) {
276
277                 if ((!zone->frag_map && current_frag_map) ||
278                     ((zone->frag_map ^ current_frag_map) &&
279                     !((zone->frag_map ^ current_frag_map) & zone->frag_map)))
280
281                         zone->frag_warn |= ZONE_FRAG_WARN_RAISE;
282         }
283
284         zone->frag_map = current_frag_map;
285 }
286
287 static void compaction_verify_zone(struct compact_control *compact,
288                                    struct zone_info *zone)
289 {
290         struct memory_info *mem_info = compact->mem_info;
291
292         /*
293          * Here comes the shady part:
294          * without some decent memory tracing here it is
295          * truly difficult to determine whether the compaction
296          * is required or not.
297          */
298         compact_validate_zone(zone, compact->frag_level, mem_info);
299 }
300
301 static void compaction_verify(struct compact_control *compact)
302 {
303         /* Get the overall idea of current external fragmentation */
304         struct memory_info *mem_info = compact->mem_info;
305         unsigned int compact_targets = 0;
306         int n;
307
308         /*
309          * Verify each zone although the compaction can be
310          * triggered per node (or globally) only.
311          */
312         for (n = 0; n < mem_info->zone_count; ++n) {
313                 struct zone_info *zone = &mem_info->zones[n];
314
315                 /*
316                  * Some devices make a zone but don't allocate any pages for it.
317                  * We can ignore these empty zones.
318                  */
319                 if (zone->managed <= 0)
320                         continue;
321
322                 compaction_verify_zone(compact, zone);
323                 if (zone->frag_warn & ZONE_FRAG_WARN_RAISE) {
324                         /*
325                          * As the compaction can be triggered either globally
326                          * or on per-node it's enough to have at least one
327                          * zone for which the external fragmentation got
328                          * dangerously high. Still to have a minimum control
329                          * over the whole process - validate all zones.
330                          */
331                         ++compact_targets;
332                 }
333         }
334
335         if (compact_targets)
336                 compaction_start(compact);
337 }
338
339
340 #define compact_zoneinfo_set(zone, _off, v) \
341         (*(typeof(v)*)(((char*)(zone)) + _off) = v)
342
343
344 static int compact_parse_zone(const char *s, regmatch_t *match,
345                                 unsigned int parse_tag, void *data)
346 {
347         struct parser_data *parser_data = (struct parser_data *)data;
348         unsigned int zone_id;
349         struct zone_info *zone;
350
351         if (parse_tag != PARSE_TAG_ZONE)
352                 return -EINVAL;
353
354         zone_id = get_zone_id(s + match[1].rm_so,
355                                  match[1].rm_eo - match[1].rm_so);
356         zone = parser_data->mem_info->zones;
357
358         if (!zone_id)
359                 return -EINVAL;
360
361         while (zone->id && zone->id != zone_id)
362                 ++zone;
363
364         if (!zone->id) {
365                 ++parser_data->mem_info->zone_count;
366                 zone->id = zone_id;
367         }
368
369         parser_data->zone = zone;
370         return 0;
371 }
372
373 static int compact_parse_zoneinfo(const char *s, regmatch_t *match,
374                                   unsigned int parse_tag,
375                                   void *data)
376 {
377         struct parser_data *parser_data = (struct parser_data *)data;
378         char *e;
379         unsigned long v;
380
381         v = strtoul(s + match[1].rm_so, &e, 0);
382         if (!(s != e))
383                 return -EINVAL;
384
385         switch (parse_tag) {
386         case PARSE_TAG_WM_MIN:
387                 compact_zoneinfo_set(parser_data->zone,
388                                 offsetof(struct zone_info, wm_min), v);
389                 break;
390         case PARSE_TAG_WM_LOW:
391                 compact_zoneinfo_set(parser_data->zone,
392                                 offsetof(struct zone_info, wm_low), v);
393
394                 break;
395         case PARSE_TAG_WM_HIGH:
396                 compact_zoneinfo_set(parser_data->zone,
397                                 offsetof(struct zone_info, wm_high), v);
398                 break;
399         case PARSE_TAG_MANAGED:
400                 compact_zoneinfo_set(parser_data->zone,
401                                 offsetof(struct zone_info, managed), v);
402                 break;
403         }
404         return 0;
405 }
406
407 static int compact_parse_pages(const char *s, regmatch_t *match,
408                                 unsigned int parse_tag, void *data)
409 {
410         struct parser_data *parser_data = (struct parser_data *)data;
411         char *e;
412         unsigned long v, page_count = 0;
413         int order;
414
415         if (parse_tag != PARSE_TAG_PAGE_COUNT)
416                 return -EINVAL;
417
418         for (order = 0; order < MAX_PAGE_ORDER; ++order) {
419
420                 v = strtoul(s, &e, 0);
421                 if (!(s != e))
422                         return -EINVAL;
423                 parser_data->zone->pages_per_order[order] = v;
424                 page_count += v << order;
425                 s = e;
426         }
427
428         if (parser_data->zone->free_pages != page_count) {
429                 /*
430                  * The drop of number of available pages is being handled
431                  * on a per-order basis, thought this might be a good point
432                  * to validate the zone's watermarks
433                  */
434                 parser_data->zone->free_pages = page_count;
435         }
436         return 0;
437 }
438
439 static int compact_get_buddyinfo(struct compact_control *compact)
440 {
441         const struct parse_arg args[] = {
442                 PARSE_TAG("zone[[:blank:]]+(Normal|DMA|DMA32|HighMem)",
443                         compact_parse_zone, ZONE),
444                 PARSE_TAG("([[:blank:]]+([0-9]+))+",
445                         compact_parse_pages, PAGE_COUNT),
446                 PARSE_TAG_EMPTY(),
447         };
448
449         struct parser_data parser_data = {
450                 .mem_info = compact->mem_info,
451                 .zone = &compact->mem_info->zones[0],
452         };
453
454         return proc_parse_buddyinfo(args, &parser_data);
455 }
456
457 static int compact_get_zoneinfo(struct compact_control *compact)
458 {
459         const struct parse_arg args[] = {
460                 PARSE_TAG("zone[[:blank:]]+(Normal|DMA|DMA32|HighMem)",
461                           compact_parse_zone, ZONE),
462                 PARSE_TAG("min[[:blank:]]+([0-9]+)\n",
463                           compact_parse_zoneinfo, WM_MIN),
464                 PARSE_TAG("low[[:blank:]]+([0-9]+)\n",
465                           compact_parse_zoneinfo, WM_LOW),
466                 PARSE_TAG("high[[:blank:]]+([0-9]+)\n",
467                           compact_parse_zoneinfo, WM_HIGH),
468                 PARSE_TAG("managed[[:blank:]]+([0-9]+)\n",
469                           compact_parse_zoneinfo, MANAGED),
470                 PARSE_TAG_EMPTY(),
471         };
472
473         struct parser_data parser_data = {
474                 .mem_info = compact->mem_info,
475                 .zone = &compact->mem_info->zones[0],
476         };
477         return proc_parse_zoneinfo(args, &parser_data);
478 }
479
480 static void compact_track_frag_level(struct compact_control *compact)
481 {
482         int woken = 1;
483
484         do {
485                 /* Eliminate updates on spurious wake-ups */
486                 if (woken) {
487                         compact_get_buddyinfo(compact);
488                         compaction_verify(compact);
489                 }
490
491                 pthread_mutex_lock(&compact_data.notify_lock);
492                 pthread_cond_wait(&compact_data.notify,
493                                 &compact_data.notify_lock);
494                 pthread_mutex_unlock(&compact_data.notify_lock);
495
496                 pthread_mutex_lock(&compact->lock);
497                 woken = compact->status & COMPACT_NOTIFIED ? 1 : 0;
498                 compact->status &= ~COMPACT_NOTIFIED;
499                 pthread_mutex_unlock(&compact->lock);
500
501         } while (!(compact->status & COMPACT_SKIP));
502
503 }
504
505 static int compact_mem_state_changed(void *data)
506 {
507         struct compact_control *compact;
508         struct memory_info *mem_info;
509         int result = RESOURCED_ERROR_NONE;
510
511         pthread_mutex_lock(&compact_data.drained_lock);
512         compact = compact_data.compact;
513         mem_info = compact ? compact->mem_info : NULL;
514         if (mem_info) {
515                 int new_state = *((int *)data);
516
517                 if (new_state < MEM_LEVEL_HIGH || new_state >= MEM_LEVEL_MAX) {
518                         result = RESOURCED_ERROR_FAIL;
519                         goto leave;
520                 }
521
522                 pthread_mutex_lock(&compact_data.compact->lock);
523                 if (!(compact->status & COMPACT_SKIP)) {
524                         compact->status |= COMPACT_NOTIFIED;
525                         pthread_cond_signal(&compact_data.notify);
526                 }
527                 pthread_mutex_unlock(&compact_data.compact->lock);
528         }
529 leave:
530         pthread_mutex_unlock(&compact_data.drained_lock);
531         return result;
532 }
533
534 static void compact_cleanup(struct compact_control *compact)
535 {
536         struct memory_info *mem_info = compact->mem_info;
537
538         if (!(compact->status & COMPACT_SKIP))
539                 _E("Invalid compact thread state [%d]\n", compact->status);
540
541         unregister_notifier(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
542                                 compact_mem_state_changed);
543
544         (void) pthread_mutex_destroy(&compact->lock);
545
546         free(mem_info);
547         free(compact);
548 }
549
550 static void *compact_tracer(void *arg)
551 {
552         struct compact_data *cdata = (struct compact_data *)arg;
553         struct compact_control *compact = cdata->compact;
554
555         if (compact_get_zoneinfo(compact) == RESOURCED_ERROR_NONE)
556                 compact_track_frag_level(compact);
557
558         /* Dropped - so clean-up */
559         pthread_mutex_lock(&compact->lock);
560         compact->status |= COMPACT_WITHDRAW;
561         pthread_mutex_unlock(&compact->lock);
562
563         pthread_mutex_lock(&cdata->drained_lock);
564         compact_cleanup(compact);
565         cdata->compact = NULL;
566         pthread_mutex_unlock(&cdata->drained_lock);
567
568         pthread_cond_signal(&cdata->drained);
569
570         pthread_exit(NULL);
571 }
572
573 static int compact_parse_config_file(struct compact_control *compact)
574 {
575         struct compact_conf *compact_conf = get_compact_conf();
576         if (!compact_conf) {
577                 _E("Compact configuration should not be NULL");
578                 return RESOURCED_ERROR_FAIL;
579         }
580         if (!compact_conf->enable) {
581                 (void) pthread_mutex_lock(&compact->lock);
582                 compact->status |= COMPACT_SKIP;
583                 (void) pthread_mutex_unlock(&compact->lock);
584         }
585
586         if (compact_conf->frag_level > 0)
587                 compact->frag_level = compact_conf->frag_level;
588
589         free_compact_conf();
590
591         _I("[DEBUG] compact status: %d", compact->status);
592         _I("[DEBUG] compact frag_level: %d", compact->frag_level);
593
594         return RESOURCED_ERROR_NONE;
595 }
596
597 /*static int compact_config_parse(struct parse_result *result, void *user_data)
598 {
599         struct compact_control *compact = (struct compact_control *)user_data;
600         unsigned long v;
601         char *e = NULL;
602
603         if (!result->section ||
604                 strncmp(result->section, COMPACT_CONFIG_SECTION, strlen(COMPACT_CONFIG_SECTION)+1))
605                 return RESOURCED_ERROR_NONE;
606
607         if (!result->name || !result->value)
608                 return RESOURCED_ERROR_NONE;
609
610         if (!strcmp(COMPACT_CONFIG_ENABLE, result->name)) {
611
612                 v =  strtol(result->value, &e, 10);
613
614                 if (!(result->value != e) || *e != '\0')
615                         return RESOURCED_ERROR_FAIL;
616
617                 if (!v) {
618                         (void) pthread_mutex_lock(&compact->lock);
619                         compact->status |= COMPACT_SKIP;
620                         (void) pthread_mutex_unlock(&compact->lock);
621                 }
622
623         } else if (!strcmp(COMPACT_CONFIG_FRAG, result->name)) {
624
625                 v = strtol(result->value, &e, 0);
626
627                 if (!(result->value != e) || *e != '\0')
628                         return RESOURCED_ERROR_FAIL;
629                 compact->frag_level = v;
630         }
631
632         return RESOURCED_ERROR_NONE;
633 }*/
634
635 static int compact_init(void *data)
636 {
637         struct memory_info      *mem_info;
638         struct compact_control  *compact;
639         int result = RESOURCED_ERROR_OUT_OF_MEMORY;
640
641         _I("[DEBUG] compact init");
642
643         pthread_mutex_lock(&compact_data.drained_lock);
644         if (compact_data.compact) {
645                 _E("[DEBUG] Unbalanced calls to compact module load/unload\n");
646                 result = RESOURCED_ERROR_NONE;
647                 goto leave;
648         }
649
650         compact = calloc(1, sizeof(*compact));
651         if (!compact)
652                 goto leave;
653
654         mem_info = calloc(1, sizeof(*mem_info));
655         if (!mem_info)
656                 goto cleanup;
657
658         compact->mem_info = mem_info;
659         compact->frag_level  = COMPACT_DEF_FRAG_LEVEL;
660
661         result = pthread_mutex_init(&compact->lock, NULL);
662         if (result) {
663                 _E("[DEBUG] Failed to init compact lock: %m");
664                 goto cleanup_all;
665         }
666
667         /* Load configuration */
668         compact_parse_config_file(compact);
669
670         if (compact->status & COMPACT_SKIP) {
671                 _I("[DEBUG] Compaction module disabled.");
672                 result = RESOURCED_ERROR_FAIL;
673                 goto cleanup_all;
674         }
675
676         compact_data.compact = compact;
677
678         result = pthread_create(&compact->compact_thread, NULL,
679                         compact_tracer, (void*)&compact_data);
680         if (result) {
681                 compact_data.compact = NULL;
682                 goto cleanup_all;
683         }
684
685         pthread_detach(compact->compact_thread);
686         pthread_mutex_unlock(&compact_data.drained_lock);
687
688         register_notifier(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
689                                  compact_mem_state_changed);
690         return RESOURCED_ERROR_NONE;
691
692 cleanup_all:
693         free(mem_info);
694 cleanup:
695         free(compact);
696 leave:
697         pthread_mutex_unlock(&compact_data.drained_lock);
698         return result;
699 }
700
701 static int compact_exit(void *data)
702 {
703         struct compact_control *compact;
704
705         pthread_mutex_lock(&compact_data.drained_lock);
706         compact = compact_data.compact;
707         compact_data.compact = NULL;
708
709         if (!compact)
710                 goto leave;
711
712         pthread_mutex_lock(&compact->lock);
713         compact->status |= COMPACT_CANCEL;
714         pthread_mutex_unlock(&compact->lock);
715
716         pthread_cond_signal(&compact_data.notify);
717         pthread_cond_wait(&compact_data.drained, &compact_data.drained_lock);
718 leave:
719         pthread_mutex_unlock(&compact_data.drained_lock);
720         return 0;
721 }
722
723 static int compact_runtime_support(void *data)
724 {
725         _cleanup_close_ int fd = -1;
726
727         fd = open(PROC_COMPACT_ENTRY, O_WRONLY);
728         if (fd < 0) {
729                 _E("Unable to open compaction procfs entry\n");
730                 return RESOURCED_ERROR_NO_DATA;
731         }
732         return RESOURCED_ERROR_NONE;
733 }
734
735 static struct module_ops compact_module_ops = {
736         .priority               = MODULE_PRIORITY_LATE,
737         .name                   = "compact",
738         .init                   = compact_init,
739         .exit                   = compact_exit,
740         .check_runtime_support  = compact_runtime_support,
741 };
742
743 MODULE_REGISTER(&compact_module_ops)
744