1e4678d894687d08df735caba0c89aaf79a3de09
[platform/core/system/resourced.git] / src / resource-limiter / memory / vmpressure-lowmem-handler.c
1 /*
2  * resourced
3  *
4  * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18
19 /*
20  * @file vmpressure-lowmem-handler.c
21  *
22  * @desc lowmem handler using memcgroup
23  *
24  * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
25  *
26  */
27
28 #include <stdio.h>
29 #include <fcntl.h>
30 #include <assert.h>
31 #include <limits.h>
32 #include <vconf.h>
33 #include <unistd.h>
34 #include <time.h>
35 #include <limits.h>
36 #include <dirent.h>
37 #include <sys/time.h>
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <sys/shm.h>
41 #include <sys/sysinfo.h>
42 #include <sys/time.h>
43 #include <sys/resource.h>
44 #include <ctype.h>
45 #include <bundle.h>
46 #include <eventsystem.h>
47 #include <malloc.h>
48
49 #include "trace.h"
50 #include "cgroup.h"
51 #include "lowmem-handler.h"
52 #include "proc-common.h"
53 #include "procfs.h"
54 #include "freezer.h"
55 #include "resourced.h"
56 #include "macro.h"
57 #include "notifier.h"
58 #include "config-parser.h"
59 #include "module.h"
60 #include "swap-common.h"
61 #include "cgroup.h"
62 #include "memory-cgroup.h"
63 #include "heart-common.h"
64 #include "proc-main.h"
65 #include "dbus-handler.h"
66 #include "util.h"
67 #include "fd-handler.h"
68 #include "resourced-helper-worker.h"
69 #include "safe-kill.h"
70 #include "dedup-common.h"
71
72 #define LOWMEM_THRES_INIT               0
73
74 #define MEMPS_EXEC_PATH                 "usr/bin/memps"
75 #define MEM_CONF_FILE                   RD_CONFIG_FILE(limiter)
76 #define MEM_SECTION             "Memory"
77 #define MEM_VIP_SECTION                 "VIP_PROCESS"
78 #define MEM_VIP_PREDEFINE               "PREDEFINE"
79 #define MEM_POPUP_SECTION               "POPUP"
80 #define MEM_POPUP_STRING                "oom_popup"
81 #define MEM_BG_RECLAIM_SECTION  "BackgroundReclaim"
82 #define MEM_BG_RECLAIM_STRING   "AfterScreenDim"
83 #define MEM_LOGGING_SECTION             "Logging"
84
85 #define BUF_MAX                         1024
86 #define MAX_VICTIMS_BETWEEN_CHECK       3
87 #define MAX_PROACTIVE_LOW_VICTIMS       2
88 #define MAX_PROACTIVE_HIGH_VICTIMS      4
89 #define FOREGROUND_VICTIMS              1
90 #define OOM_TIMER_INTERVAL              2
91 #define OOM_KILLER_PRIORITY             -20
92 #define THRESHOLD_MARGIN                10 /* MB */
93
94 #define MEM_SIZE_64                     64  /* MB */
95 #define MEM_SIZE_256                    256 /* MB */
96 #define MEM_SIZE_448                    448 /* MB */
97 #define MEM_SIZE_512                    512 /* MB */
98 #define MEM_SIZE_768                    768 /* MB */
99 #define MEM_SIZE_1024                   1024 /* MB */
100 #define MEM_SIZE_2048                   2048 /* MB */
101
102 /* thresholds for 64M RAM*/
103 #define PROACTIVE_64_THRES                      10 /* MB */
104 #define PROACTIVE_64_LEAVE                      30 /* MB */
105 #define CGROUP_ROOT_64_THRES_DEDUP              16 /* MB */
106 #define CGROUP_ROOT_64_THRES_SWAP               15 /* MB */
107 #define CGROUP_ROOT_64_THRES_LOW                8  /* MB */
108 #define CGROUP_ROOT_64_THRES_MEDIUM             5  /* MB */
109 #define CGROUP_ROOT_64_THRES_LEAVE              8  /* MB */
110 #define CGROUP_ROOT_64_NUM_VICTIMS              1
111
112 /* thresholds for 256M RAM */
113 #define PROACTIVE_256_THRES                     50 /* MB */
114 #define PROACTIVE_256_LEAVE                     80 /* MB */
115 #define CGROUP_ROOT_256_THRES_DEDUP     60 /* MB */
116 #define CGROUP_ROOT_256_THRES_SWAP              40 /* MB */
117 #define CGROUP_ROOT_256_THRES_LOW               20 /* MB */
118 #define CGROUP_ROOT_256_THRES_MEDIUM            10 /* MB */
119 #define CGROUP_ROOT_256_THRES_LEAVE             20 /* MB */
120 #define CGROUP_ROOT_256_NUM_VICTIMS             2
121
122 /* threshold for 448M RAM */
123 #define PROACTIVE_448_THRES                     80 /* MB */
124 #define PROACTIVE_448_LEAVE                     100 /* MB */
125 #define CGROUP_ROOT_448_THRES_DEDUP     120 /* MB */
126 #define CGROUP_ROOT_448_THRES_SWAP              100 /* MB */
127 #define CGROUP_ROOT_448_THRES_LOW               60  /* MB */
128 #define CGROUP_ROOT_448_THRES_MEDIUM            50  /* MB */
129 #define CGROUP_ROOT_448_THRES_LEAVE             70  /* MB */
130 #define CGROUP_ROOT_448_NUM_VICTIMS             5
131
132 /* threshold for 512M RAM */
133 #define PROACTIVE_512_THRES                     100 /* MB */
134 #define PROACTIVE_512_LEAVE                     80 /* MB */
135 #define CGROUP_ROOT_512_THRES_DEDUP     140 /* MB */
136 #define CGROUP_ROOT_512_THRES_SWAP              100 /* MB */
137 #define CGROUP_ROOT_512_THRES_LOW               70  /* MB */
138 #define CGROUP_ROOT_512_THRES_MEDIUM            60  /* MB */
139 #define CGROUP_ROOT_512_THRES_LEAVE             80  /* MB */
140 #define CGROUP_ROOT_512_NUM_VICTIMS             5
141
142 /* threshold for 768 RAM */
143 #define PROACTIVE_768_THRES                     100 /* MB */
144 #define PROACTIVE_768_LEAVE                     130 /* MB */
145 #define CGROUP_ROOT_768_THRES_DEDUP     180 /* MB */
146 #define CGROUP_ROOT_768_THRES_SWAP              150 /* MB */
147 #define CGROUP_ROOT_768_THRES_LOW               90  /* MB */
148 #define CGROUP_ROOT_768_THRES_MEDIUM            80  /* MB */
149 #define CGROUP_ROOT_768_THRES_LEAVE             100  /* MB */
150 #define CGROUP_ROOT_768_NUM_VICTIMS             5
151
152 /* threshold for more than 1024M RAM */
153 #define PROACTIVE_1024_THRES                    230 /* MB */
154 #define PROACTIVE_1024_LEAVE                    150 /* MB */
155 #define CGROUP_ROOT_1024_THRES_DEDUP            400 /* MB */
156 #define CGROUP_ROOT_1024_THRES_SWAP             300 /* MB */
157 #define CGROUP_ROOT_1024_THRES_LOW              120 /* MB */
158 #define CGROUP_ROOT_1024_THRES_MEDIUM           100 /* MB */
159 #define CGROUP_ROOT_1024_THRES_LEAVE            150 /* MB */
160 #define CGROUP_ROOT_1024_NUM_VICTIMS            5
161
162 /* threshold for more than 2048M RAM */
163 #define PROACTIVE_2048_THRES                    200 /* MB */
164 #define PROACTIVE_2048_LEAVE                    500 /* MB */
165 #define CGROUP_ROOT_2048_THRES_DEDUP            400 /* MB */
166 #define CGROUP_ROOT_2048_THRES_SWAP             300 /* MB */
167 #define CGROUP_ROOT_2048_THRES_LOW              200 /* MB */
168 #define CGROUP_ROOT_2048_THRES_MEDIUM           160 /* MB */
169 #define CGROUP_ROOT_2048_THRES_LEAVE            300 /* MB */
170 #define CGROUP_ROOT_2048_NUM_VICTIMS            10
171
172 /* threshold for more than 3072M RAM */
173 #define PROACTIVE_3072_THRES                    300 /* MB */
174 #define PROACTIVE_3072_LEAVE                    700 /* MB */
175 #define CGROUP_ROOT_3072_THRES_DEDUP            600 /* MB */
176 #define CGROUP_ROOT_3072_THRES_SWAP             500 /* MB */
177 #define CGROUP_ROOT_3072_THRES_LOW              400 /* MB */
178 #define CGROUP_ROOT_3072_THRES_MEDIUM           250 /* MB */
179 #define CGROUP_ROOT_3072_THRES_LEAVE            500 /* MB */
180 #define CGROUP_ROOT_3072_NUM_VICTIMS            10
181
182 static unsigned proactive_threshold;
183 static unsigned proactive_leave;
184 static unsigned lmk_start_threshold;
185
186 static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
187
188 /**
189  * Resourced Low Memory Killer
190  * NOTE: planned to be moved to a separate file.
191  */
192 /*-------------------------------------------------*/
193 #define OOM_TIMER_INTERVAL_SEC  2
194 #define LMW_LOOP_WAIT_TIMEOUT_MSEC      OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
195 #define LMW_RETRY_WAIT_TIMEOUT_MSEC     (G_USEC_PER_SEC)
196
197 struct lowmem_control {
198         /*
199          * For each queued request the following properties
200          * are required with two exceptions:
201          *  - status is being set by LMK
202          *  - callback is optional
203          */
204         /* Processing flags*/
205         unsigned int flags;
206         /* Indictator for OOM score of targeted processes */
207         enum cgroup_type type;
208
209         /* Desired size to be restored - level to be reached (MB)*/
210         unsigned int size;
211         /* Max number of processes to be considered */
212         unsigned int count;
213         /* Memory reclaim status */
214         int status;
215         /*
216          * Optional - if set, will be triggered by LMK once the request
217          * is handled.
218          */
219         void (*callback) (struct lowmem_control *);
220 };
221
222 struct lowmem_worker {
223         pthread_t       worker_thread;
224         GAsyncQueue     *queue;
225         int             active;
226         int             running;
227 };
228
229 static struct lowmem_worker lmw;
230
231 //static int memlog_enabled;
232 //static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
233 /* remove logfiles to reduce to this threshold.
234  * it is about five-sixths of the memlog_nr_max. */
235 //static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
236 //static char *memlog_path = DEFAULT_MEMLOG_PATH;
237 //static char *memlog_prefix[MEMLOG_MAX];
238
239 #define LOWMEM_WORKER_IS_ACTIVE(_lmw)   g_atomic_int_get(&(_lmw)->active)
240 #define LOWMEM_WORKER_ACTIVATE(_lmw)    g_atomic_int_set(&(_lmw)->active, 1)
241 #define LOWMEM_WORKER_DEACTIVATE(_lmw)  g_atomic_int_set(&(_lmw)->active, 0)
242
243 #define LOWMEM_WORKER_IS_RUNNING(_lmw)  g_atomic_int_get(&(_lmw)->running)
244 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
245 #define LOWMEM_WORKER_IDLE(_lmw)        g_atomic_int_set(&(_lmw)->running, 0)
246
247 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
248
249 #define LOWMEM_DESTROY_REQUEST(_ctl)            \
250         g_slice_free(typeof(*(_ctl)), _ctl);    \
251
252 #define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb)   \
253 {                                                                       \
254         (c)->flags      = __flags; (c)->type    = __type;               \
255         (c)->size       = __size;  (c)->count   = __count;              \
256         (c)->callback   = __cb;                                         \
257 }
258
259 #define BUFF_MAX        255
260 #define APP_ATTR_PATH "/proc/%d/attr/current"
261
262 static int get_privilege(pid_t pid, char *name, size_t len)
263 {
264         char path[PATH_MAX];
265         char attr[BUFF_MAX];
266         size_t attr_len;
267         FILE *fp;
268
269         snprintf(path, sizeof(path), APP_ATTR_PATH, pid);
270
271         fp = fopen(path, "r");
272         if (!fp)
273                 return -errno;
274
275         attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
276         fclose(fp);
277         if (attr_len <= 0)
278                 return -ENOENT;
279
280         attr[attr_len] = '\0';
281
282         snprintf(name, len, "%s", attr);
283         return 0;
284 }
285
286 static int is_app(pid_t pid)
287 {
288         char attr[BUFF_MAX];
289         size_t len;
290         int ret;
291
292         ret = get_privilege(pid, attr, sizeof(attr));
293         if (ret < 0) {
294                 _E("Failed to get privilege of PID(%d).", pid);
295                 return -1;
296         }
297
298         len = strlen(attr) + 1;
299
300         if (!strncmp("System", attr, len))
301                 return 0;
302
303         if (!strncmp("User", attr, len))
304                 return 0;
305
306         if (!strncmp("System::Privileged", attr, len))
307                 return 0;
308
309         return 1;
310 }
311
312
313 static void lowmem_queue_request(struct lowmem_worker *lmw,
314                                 struct lowmem_control *ctl)
315 {
316         if (LOWMEM_WORKER_IS_ACTIVE(lmw))
317                 g_async_queue_push(lmw->queue, ctl);
318 }
319
320 /* internal */
321 static void lowmem_drain_queue(struct lowmem_worker *lmw)
322 {
323         struct lowmem_control *ctl;
324
325         g_async_queue_lock(lmw->queue);
326         while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
327                 if (ctl->callback)
328                         ctl->callback(ctl);
329                 LOWMEM_DESTROY_REQUEST(ctl);
330         }
331         g_async_queue_unlock(lmw->queue);
332 }
333
334 static void lowmem_request_destroy(gpointer data)
335 {
336         struct lowmem_control *ctl = (struct lowmem_control*) data;
337
338         if (ctl->callback)
339                 ctl->callback(ctl);
340         LOWMEM_DESTROY_REQUEST(ctl);
341 }
342
343 /*-------------------------------------------------*/
344
345 /* low memory action function for cgroup */
346 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi);
347 /* low memory action function */
348 static void high_mem_act(void);
349 static void swap_activate_act(void);
350 static void swap_compact_act(void);
351 static void lmk_act(void);
352
353
354 static size_t cur_mem_state = MEM_LEVEL_HIGH;
355 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
356 static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
357
358 static unsigned long totalram;
359 static unsigned long ktotalram;
360
361 static struct module_ops memory_modules_ops;
362 static const struct module_ops *lowmem_ops;
363 static bool oom_popup_enable;
364 static bool oom_popup;
365 static bool memcg_swap_status;
366 static bool bg_reclaim;
367 static int fragmentation_size;
368
369 static const char *convert_cgroup_type_to_str(int type)
370 {
371         static const char *type_table[] =
372         {"/", "VIP", "High", "Medium", "Lowest"};
373         if (type >= CGROUP_ROOT && type <= CGROUP_LOW)
374                 return type_table[type];
375         else
376                 return "Error";
377 }
378
379 static const char *convert_status_to_str(int status)
380 {
381         static const char *status_table[] =
382         {"none", "done", "drop", "cont", "retry", "next_type"};
383         if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
384                 return status_table[status];
385         return "error status";
386 }
387
388 static const char *convert_memstate_to_str(int mem_state)
389 {
390         static const char *state_table[] = {"mem high", "mem medium",
391                 "mem low", "mem critical", "mem oom",};
392         if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
393                 return state_table[mem_state];
394         return "";
395 }
396
397 static int lowmem_launch_oompopup(void)
398 {
399         GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
400         g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
401
402         GVariant *const params = g_variant_new("(a{ss})", gv_builder);
403         g_variant_builder_unref(gv_builder);
404
405         int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
406                 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
407                 "PopupLaunch", params);
408
409         g_variant_unref(params);
410
411         return ret;
412 }
413
414 static inline void get_total_memory(void)
415 {
416         struct sysinfo si;
417         if (totalram)
418                 return;
419
420         if (!sysinfo(&si)) {
421                 totalram = si.totalram;
422                 ktotalram = BYTE_TO_KBYTE(totalram);
423         }
424 }
425
426 static int lowmem_mem_usage_uss(pid_t pid, unsigned int *usage)
427 {
428         unsigned int uss, zram = 0;
429         int ret;
430
431         *usage = 0;
432
433         /*
434          * In lowmem we need to know memory size of processes to
435          * for terminating apps. To get most real value of usage
436          * we should use USS + ZRAM usage for selected process.
437          *
438          * Those values will contain the most approximated amount
439          * of memory that will be freed after process termination.
440          */
441         ret = proc_get_uss(pid, &uss);
442         if (ret != RESOURCED_ERROR_NONE)
443                 return ret;
444
445         if (swap_get_state() == SWAP_ON) {
446                 ret = proc_get_zram_usage(pid, &zram);
447                 /* If we don't get zram usage, it's not a problem */
448                 if (ret != RESOURCED_ERROR_NONE)
449                         zram = 0;
450         }
451         *usage = uss + zram;
452         return RESOURCED_ERROR_NONE;
453 }
454
455 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
456 {
457         unsigned int size = 0, total_size = 0;
458         int index, ret;
459         pid_t pid;
460
461         /*
462          * If pids are allocated only when there are multiple processes with
463          * the same pgid e.g., browser and web process. Mostly, single process
464          * is used.
465          */
466         if (tsk->pids == NULL) {
467                 ret = proc_get_ram_usage(tsk->pid, &size);
468
469                 /* If there is no proc entry for given pid the process
470                  * should be abandoned during further processing
471                  */
472                 if (ret < 0)
473                         _D("failed to get rss memory usage of %d", tsk->pid);
474
475                 return size;
476         }
477
478         for (index = 0; index < tsk->pids->len; index++) {
479                 pid = g_array_index(tsk->pids, pid_t, index);
480                 ret = proc_get_ram_usage(pid, &size);
481                 if (ret != RESOURCED_ERROR_NONE)
482                         continue;
483                 total_size += size;
484         }
485
486         return total_size;
487 }
488
489 /*static int memps_file_select(const struct dirent *entry)
490 {
491         return strstr(entry->d_name, "memps") ? 1 : 0;
492 }
493
494 static char *strrstr(const char *str, const char *token)
495 {
496         int len = strlen(token);
497         const char *p = str + strlen(str);
498
499         while (str <= --p)
500                 if (p[0] == token[0] && strncmp(p, token, len) == 0)
501                         return (char *)p;
502
503         return NULL;
504 }
505
506 static int timesort(const struct dirent **a, const struct dirent **b)
507 {
508         long long time1 = 0;
509         long long time2 = 0;
510         char *ptr;
511
512         ptr = strrstr((*a)->d_name, "_");
513         if (ptr && *++ptr)
514                 time1 = atoll(ptr);
515
516         ptr = strrstr((*b)->d_name, "_");
517         if (ptr && *++ptr)
518                 time2 = atoll(ptr);
519
520         return (time1 - time2);
521 }
522
523 static int clear_logs(void *data)
524 {
525         struct dirent **namelist;
526         int n, i, ret;
527         char fpath[BUF_MAX];
528         char *fname;
529         char *dir = (char*)data;
530         int len;
531
532         if (!memlog_enabled)
533                 return RESOURCED_ERROR_NONE;
534
535         if (!dir)
536                 return RESOURCED_ERROR_NONE;
537
538         len = strlen(dir);
539         if (len <= 0 || len >= sizeof fpath - 1) {
540                 _E("Invalid parameter - Directory path is too short or too long");
541                 return RESOURCED_ERROR_INVALID_PARAMETER;
542         }
543
544         n = scandir(dir, &namelist, memps_file_select, timesort);
545
546         _D("num of log files %d", n);
547         if (n <= memlog_nr_max) {
548                 while (n--)
549                         free(namelist[n]);
550                 free(namelist);
551                 return RESOURCED_ERROR_NONE;
552         }
553
554         strncpy(fpath, dir, sizeof fpath - 1);
555         fpath[sizeof fpath - 1] = '\0';
556         fname = fpath + len;
557         *fname++ = '/';
558
559         len = sizeof fpath - len - 1;
560         for (i = 0; i < n; i++) {
561                 if (i < n - memlog_remove_batch_thres) {
562                         if (strlen(namelist[i]->d_name) > len - 1)
563                                 continue;
564                         strncpy(fname, namelist[i]->d_name, len - 1);
565                         fpath[sizeof fpath - 1] = '\0';
566                         _D("remove log file %s", fpath);
567                         ret = remove(fpath);
568                         if (ret < 0)
569                                 _E("%s file cannot removed", fpath);
570                 }
571
572                 free(namelist[i]);
573         }
574         free(namelist);
575         return RESOURCED_ERROR_NONE;
576 }
577
578 void make_memps_log(enum mem_log memlog, pid_t pid, char *victim_name)
579 {
580         time_t now;
581         struct tm cur_tm;
582         char new_log[BUF_MAX];
583         static pid_t old_pid;
584         int oom_score_adj = 0, ret;
585         char *prefix;
586
587         if (!memlog_enabled)
588                 return;
589
590         if (memlog < MEMLOG_MEMPS || memlog >= MEMLOG_MAX)
591                 return;
592
593         prefix = memlog_prefix[memlog];
594
595         if (old_pid == pid)
596                 return;
597
598         old_pid = pid;
599
600         now = time(NULL);
601
602         if (localtime_r(&now, &cur_tm) == NULL) {
603                 _E("Fail to get localtime");
604                 return;
605         }
606
607         snprintf(new_log, sizeof(new_log),
608                 "%s/%s_%s_%d_%.4d%.2d%.2d%.2d%.2d%.2d", memlog_path, prefix, victim_name,
609                 pid, (1900 + cur_tm.tm_year), 1 + cur_tm.tm_mon,
610                 cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min,
611                 cur_tm.tm_sec);
612
613         ret = proc_get_oom_score_adj(pid, &oom_score_adj);
614         if (ret || oom_score_adj > OOMADJ_BACKGRD_LOCKED) {
615
616                 _cleanup_fclose_ FILE *f = NULL;
617
618                 f = fopen(new_log, "w");
619                 if (!f) {
620                         _E("fail to create memps log %s", new_log);
621                         return;
622                 }
623                 proc_print_meninfo(f);
624
625         } else {
626
627                 const char *argv[4] = {"/usr/bin/memps", "-f", NULL, NULL};
628
629                 argv[2] = new_log;
630                 exec_cmd(ARRAY_SIZE(argv), argv);
631         }
632
633         clear_logs(memlog_path);
634 }*/
635
636 static int lowmem_kill_victim(const struct task_info *tsk,
637                 int flags, int memps_log, unsigned int *victim_size)
638 {
639         pid_t pid;
640         int ret;
641         char appname[PATH_MAX];
642         int sigterm = 0;
643         struct proc_app_info *pai;
644
645         pid = tsk->pid;
646
647         if (pid <= 0 || pid == getpid())
648                 return RESOURCED_ERROR_FAIL;
649
650         ret = proc_get_cmdline(pid, appname, sizeof appname);
651         if (ret == RESOURCED_ERROR_FAIL)
652                 return RESOURCED_ERROR_FAIL;
653
654         if (!strcmp("memps", appname) ||
655             !strcmp("crash-worker", appname) ||
656             !strcmp("system-syspopup", appname)) {
657                 _E("%s(%d) was selected, skip it", appname, pid);
658                 return RESOURCED_ERROR_FAIL;
659         }
660
661 /*      if (!memps_log)
662                 make_memps_log(MEMLOG_MEMPS, pid, appname);*/
663
664         pai = tsk->pai;
665         if (pai) {
666                 resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
667                         pid, NULL, NULL, PROC_TYPE_NONE);
668
669                 if (tsk->oom_score_lru <= OOMADJ_BACKGRD_LOCKED) {
670                         sigterm = 1;
671                 } else if (tsk->oom_score_lru > OOMADJ_BACKGRD_LOCKED && tsk->oom_score_lru < OOMADJ_BACKGRD_UNLOCKED) {
672                         int app_flag = pai->flags;
673                         sigterm = app_flag & PROC_SIGTERM;
674                 }
675
676                 if (pai->memory.oom_killed)
677                         sigterm = 0;
678
679                 pai->memory.oom_killed = true;
680         }
681
682         if (sigterm)
683                 safe_kill(pid, SIGTERM);
684         else
685                 safe_kill(pid, SIGKILL);
686
687         _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u, sigterm = %d\n",
688            flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
689            tsk->size, sigterm);
690         *victim_size = tsk->size;
691
692         if (tsk->oom_score_lru > OOMADJ_FOREGRD_UNLOCKED)
693                 return RESOURCED_ERROR_NONE;
694
695         if (oom_popup_enable && !oom_popup) {
696                 lowmem_launch_oompopup();
697                 oom_popup = true;
698         }
699 /*      if (memps_log)
700                 make_memps_log(MEMLOG_MEMPS, pid, appname);*/
701
702         return RESOURCED_ERROR_NONE;
703 }
704
705 /* return LOWMEM_RECLAIM_CONT when killing should be continued */
706 static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
707 {
708         unsigned int available;
709
710         /*
711          * Processes with the priority higher than perceptible are killed
712          * only when the available memory is less than dynamic oom threshold.
713          */
714         if (tsk->oom_score_lru > OOMADJ_BACKGRD_PERCEPTIBLE)
715                 return LOWMEM_RECLAIM_CONT;
716
717         if (flags & (OOM_FORCE|OOM_SINGLE_SHOT)) {
718                 _I("[LMK] %d is dropped during force kill, flag=%d",
719                         tsk->pid, flags);
720                 return LOWMEM_RECLAIM_DROP;
721         }
722         available = proc_get_mem_available();
723         if (available > lmk_start_threshold) {
724                 _I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
725                         available, lmk_start_threshold);
726                 return LOWMEM_RECLAIM_RETRY;
727         }
728         return LOWMEM_RECLAIM_CONT;
729 }
730
731 static int compare_victims(const struct task_info *ta, const struct task_info *tb)
732 {
733         unsigned int pa, pb;
734
735         assert(ta != NULL);
736         assert(tb != NULL);
737         /*
738          * followed by kernel badness point calculation using heuristic.
739          * oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
740          */
741         pa = ta->oom_score_lru * (ktotalram / 2000) + ta->size;
742         pb = tb->oom_score_lru * (ktotalram / 2000) + tb->size;
743
744         return pb - pa;
745 }
746
747 static void lowmem_free_task_info_array(GArray *array)
748 {
749         int i;
750
751         for (i = 0; i < array->len; i++) {
752                 struct task_info *tsk;
753
754                 tsk = &g_array_index(array, struct task_info, i);
755                 if (tsk->pids)
756                         g_array_free(tsk->pids, true);
757         }
758
759         g_array_free(array, true);
760 }
761
762 static inline int is_dynamic_process_killer(int flags)
763 {
764         return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
765 }
766
767 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
768 {
769         unsigned int available = proc_get_mem_available();
770         unsigned int should_be_freed = 0;
771
772         if (available < thres)
773                 should_be_freed = thres - available;
774         /*
775          * free THRESHOLD_MARGIN more than real should be freed,
776          * because launching app is consuming up the memory.
777          */
778         if (should_be_freed > 0)
779                 should_be_freed += THRESHOLD_MARGIN;
780
781         *avail = available;
782
783         return should_be_freed;
784 }
785
786 static int lowmem_get_pids_proc(GArray *pids)
787 {
788         DIR *dp;
789         struct dirent *dentry;
790
791         dp = opendir("/proc");
792         if (!dp) {
793                 _E("fail to open /proc");
794                 return RESOURCED_ERROR_FAIL;
795         }
796         while ((dentry = readdir(dp)) != NULL) {
797                 struct task_info tsk;
798                 pid_t pid = 0, pgid = 0;
799                 int oom = 0;
800
801                 if (!isdigit(dentry->d_name[0]))
802                         continue;
803
804                 pid = (pid_t)atoi(dentry->d_name);
805                 if (pid < 1)
806                         /* skip invalid pids or kernel processes */
807                         continue;
808
809                 pgid = getpgid(pid);
810                 if (pgid < 1)
811                         continue;
812
813                 if(is_app(pid) != 1)
814                         continue;
815
816                 if (proc_get_oom_score_adj(pid, &oom) < 0) {
817                         _D("pid(%d) was already terminated", pid);
818                         continue;
819                 }
820
821                 /* VIP pids should be excluded from the LMK list */
822                 if (cgroup_get_type(oom) == CGROUP_VIP)
823                         continue;
824
825                 /*
826                  * Check whether this array includes applications or not.
827                  * If it doesn't require to get applications
828                  * and pid has been already included in pai,
829                  * skip to append.
830                  */
831                 if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
832                         continue;
833
834                 /*
835                  * Currently, for tasks in the memory cgroup,
836                  * do not consider multiple tasks with one pgid.
837                  */
838                 tsk.pid = pid;
839                 tsk.pgid = pgid;
840                 tsk.oom_score_adj = oom;
841                 tsk.oom_score_lru = oom;
842                 tsk.pids = NULL;
843                 tsk.size = lowmem_get_task_mem_usage_rss(&tsk);
844                 tsk.pai = NULL;
845
846                 g_array_append_val(pids, tsk);
847         }
848
849         closedir(dp);
850         return RESOURCED_ERROR_NONE;
851 }
852
853 /**
854  * @brief Terminate up to max_victims processes after finding them from pai.
855         It depends on proc_app_info lists
856         and it also reference systemservice cgroup
857         because some processes in this group don't have proc_app_info.
858  *
859  * @max_victims:            max number of processes to be terminated
860  * @start_oom:      find victims from start oom adj score value
861  * @end_oom: find victims to end oom adj score value
862  * @should_be_freed: amount of memory to be reclaimed (in MB)
863  * @total_size[out]: total size of possibly reclaimed memory (required)
864  * @completed:      final outcome (optional)
865  * @threshold:          desired value of memory available
866  */
867 static int lowmem_kill_victims(int max_victims,
868         int start_oom, int end_oom, unsigned should_be_freed, int flags,
869         unsigned int *total_size, int *completed, int threshold)
870 {
871         int total_count = 0;
872         GSList *proc_app_list = NULL;
873         int i, ret, victim = 0;
874         unsigned int victim_size = 0;
875         unsigned int total_victim_size = 0;
876         int status = LOWMEM_RECLAIM_NONE;
877         GArray *candidates = NULL;
878         GSList *iter, *iterchild;
879         struct proc_app_info *pai = NULL;
880         int oom_score_adj;
881         int should_be_freed_kb = MBYTE_TO_KBYTE(should_be_freed);
882
883         candidates = g_array_new(false, false, sizeof(struct task_info));
884
885         proc_app_list = proc_app_list_open();
886         gslist_for_each_item(iter, proc_app_list) {
887                 struct task_info ti;
888
889                 total_count++;
890                 pai = (struct proc_app_info *)iter->data;
891                 if (!pai->main_pid)
892                         continue;
893
894                 oom_score_adj = pai->memory.oom_score_adj;
895                 if (oom_score_adj > end_oom || oom_score_adj < start_oom)
896                         continue;
897
898                 if ((flags & OOM_REVISE) && pai->memory.oom_killed)
899                         continue;
900
901                 ti.pid = pai->main_pid;
902                 ti.pgid = getpgid(ti.pid);
903                 ti.oom_score_adj = oom_score_adj;
904                 ti.pai = pai;
905
906                 /*
907                  * Before oom_score_adj of favourite (oom_score = 270) applications is
908                  * independent of lru_state, now we consider lru_state, while
909                  * killing favourite process.
910                  */
911
912                 if (oom_score_adj == OOMADJ_FAVORITE && pai->lru_state >= PROC_BACKGROUND)
913                         ti.oom_score_lru = OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE * pai->lru_state;
914                 else
915                         ti.oom_score_lru = oom_score_adj;
916
917                 if (pai->childs) {
918                         ti.pids = g_array_new(false, false, sizeof(pid_t));
919                         g_array_append_val(ti.pids, ti.pid);
920                         gslist_for_each_item(iterchild, pai->childs) {
921                                 pid_t child = GPOINTER_TO_PID(iterchild->data);
922                                 g_array_append_val(ti.pids, child);
923                         }
924                 } else
925                         ti.pids = NULL;
926
927                 g_array_append_val(candidates, ti);
928         }
929
930         proc_app_list_close();
931
932         if (!candidates->len) {
933                 status = LOWMEM_RECLAIM_NEXT_TYPE;
934                 goto leave;
935         }
936         else {
937                 _D("[LMK] candidate ratio=%d/%d", candidates->len, total_count);
938         }
939
940         for (i = 0; i < candidates->len; i++) {
941                 struct task_info *tsk;
942
943                 tsk = &g_array_index(candidates, struct task_info, i);
944                 tsk->size = lowmem_get_task_mem_usage_rss(tsk);
945         }
946
947         /*
948          * In case of start_oom == OOMADJ_SU,
949          * we're going to try to kill some of processes in /proc
950          * to handle low memory situation.
951          * It can find malicious system process even though it has low oom score.
952          */
953         if (start_oom == OOMADJ_SU)
954                 lowmem_get_pids_proc(candidates);
955
956         g_array_sort(candidates, (GCompareFunc)compare_victims);
957
958         for (i = 0; i < candidates->len; i++) {
959                 struct task_info *tsk;
960
961                 if (i >= max_victims) {
962                         status = LOWMEM_RECLAIM_NEXT_TYPE;
963                         break;
964                 }
965
966                 /*
967                  * Available memory is checking only every
968                  * num_vict_between_check process for reducing burden.
969                  */
970                 if (!(i % num_vict_between_check)) {
971                         if (proc_get_mem_available() > threshold) {
972                                 status = LOWMEM_RECLAIM_DONE;
973                                 break;
974                         }
975                 }
976
977                 if (!(flags & OOM_NOMEMORY_CHECK) &&
978                     total_victim_size >= should_be_freed_kb) {
979                         _D("[LMK] victim=%d, max_victims=%d, total_size=%uKB",
980                                 victim, max_victims, total_victim_size);
981                         status = LOWMEM_RECLAIM_DONE;
982                         break;
983                 }
984
985                 tsk = &g_array_index(candidates, struct task_info, i);
986
987                 status = lowmem_check_kill_continued(tsk, flags);
988                 if (status != LOWMEM_RECLAIM_CONT)
989                         break;
990
991                 _I("[LMK] select victims from proc_app_list pid(%d) with oom_score_adj(%d)\n", tsk->pid, tsk->oom_score_adj);
992
993                 ret = lowmem_kill_victim(tsk, flags, i, &victim_size);
994                 if (ret != RESOURCED_ERROR_NONE)
995                         continue;
996                 victim++;
997                 total_victim_size += victim_size;
998         }
999
1000 leave:
1001         lowmem_free_task_info_array(candidates);
1002         *total_size = total_victim_size;
1003         if(*completed != LOWMEM_RECLAIM_CONT)
1004                 *completed = status;
1005         else
1006                 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
1007         return victim;
1008 }
1009
1010 static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
1011 {
1012         if (type == CGROUP_VIP || type >= CGROUP_END || type <= CGROUP_TOP) {
1013                 _E("cgroup type (%d) is out of scope", type);
1014                 return RESOURCED_ERROR_FAIL;
1015         }
1016
1017         *max = cgroup_get_highest_oom_score_adj(type);
1018         *min = cgroup_get_lowest_oom_score_adj(type);
1019
1020         return RESOURCED_ERROR_NONE;
1021 }
1022
1023 static void lowmem_handle_request(struct lowmem_control *ctl)
1024 {
1025         int start_oom, end_oom;
1026         int count = 0, victim_cnt = 0;
1027         int max_victim_cnt = ctl->count;
1028         int status = LOWMEM_RECLAIM_NONE;
1029         unsigned int available = 0;
1030         unsigned int total_size = 0;
1031         unsigned int current_size = 0;
1032         unsigned int reclaim_size, shortfall = 0;
1033         enum cgroup_type cgroup_type = ctl->type;
1034
1035         available = proc_get_mem_available();
1036         reclaim_size = ctl->size  > available
1037                      ? ctl->size - available : 0;
1038
1039         if (!reclaim_size) {
1040                 status = LOWMEM_RECLAIM_DONE;
1041                 goto done;
1042         }
1043
1044 retry:
1045         /* Prepare LMK to start doing it's job. Check preconditions. */
1046         if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
1047                 goto done;
1048
1049         lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1050         shortfall = is_memory_recovered(&available, ctl->size);
1051
1052         if (!shortfall || !reclaim_size) {
1053                 status = LOWMEM_RECLAIM_DONE;
1054                 goto done;
1055         }
1056
1057         /* precaution */
1058         current_size = 0;
1059         victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
1060                             reclaim_size, ctl->flags, &current_size, &status, ctl->size);
1061
1062         if (victim_cnt) {
1063                 current_size = KBYTE_TO_MBYTE(current_size);
1064                 reclaim_size -= reclaim_size > current_size
1065                         ? current_size : reclaim_size;
1066                 total_size += current_size;
1067                 count += victim_cnt;
1068                 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
1069                                 victim_cnt, current_size,
1070                                 start_oom, end_oom, convert_status_to_str(status));
1071         }
1072
1073         if ((status == LOWMEM_RECLAIM_DONE) ||
1074             (status == LOWMEM_RECLAIM_DROP) ||
1075             (status == LOWMEM_RECLAIM_RETRY))
1076                 goto done;
1077
1078         /*
1079          * If it doesn't finish reclaiming memory in first operation,
1080                 - if flags has OOM_IN_DEPTH,
1081                    try to find victims again in the active cgroup.
1082                    otherwise, just return because there is no more victims in the desired cgroup.
1083                 - if flags has OOM_REVISE,
1084                    it means that resourced can't find victims from proc_app_list.
1085                    So, it should search victims or malicious process from /proc.
1086                    But searching /proc leads to abnormal behaviour.
1087                    (Make sluggish or kill same victims continuously)
1088                    Thus, otherwise, just return in first operation and wait some period.
1089          */
1090         if (cgroup_type == CGROUP_LOW) {
1091                 cgroup_type = CGROUP_MEDIUM;
1092                 goto retry;
1093         } else if ((cgroup_type == CGROUP_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
1094                 cgroup_type = CGROUP_HIGH;
1095                 if(ctl->flags & OOM_FORCE)
1096                         max_victim_cnt = FOREGROUND_VICTIMS;
1097                 goto retry;
1098         } else if ((cgroup_type == CGROUP_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
1099                 status = LOWMEM_RECLAIM_RETRY;
1100                 ctl->type = CGROUP_ROOT;
1101         }
1102         else if (cgroup_type == CGROUP_ROOT) {
1103                 status = LOWMEM_RECLAIM_RETRY;
1104         }
1105 done:
1106         _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
1107                 count, total_size, reclaim_size, shortfall, convert_status_to_str(status));
1108
1109         /* After we finish reclaiming it's worth to remove oldest memps logs */
1110 /*      if (count && memlog_enabled)
1111                 request_helper_worker(CLEAR_LOGS, memlog_path, clear_logs, NULL);*/
1112         ctl->status = status;
1113 }
1114
1115 static void *lowmem_reclaim_worker(void *arg)
1116 {
1117         struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
1118
1119         setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
1120
1121         g_async_queue_ref(lmw->queue);
1122
1123         while (1) {
1124                 int try_count = 0;
1125                 struct lowmem_control *ctl;
1126
1127                 LOWMEM_WORKER_IDLE(lmw);
1128                 /* Wait on any wake-up call */
1129                 ctl = g_async_queue_pop(lmw->queue);
1130
1131                 if (ctl->flags & OOM_DROP)
1132                         LOWMEM_DESTROY_REQUEST(ctl);
1133
1134                 if (!LOWMEM_WORKER_IS_ACTIVE(lmw) || !ctl)
1135                         break;
1136
1137                 LOWMEM_WORKER_RUN(lmw);
1138 process_again:
1139                 _D("[LMK] %d tries", ++try_count);
1140                 lowmem_handle_request(ctl);
1141                 /**
1142                  * Case the process failed to reclaim requested amount of memory
1143                  * or still under have memory pressure - try the timeout wait.
1144                  * There is a chance this will get woken-up in a better reality.
1145                  */
1146                 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
1147                     !(ctl->flags & OOM_SINGLE_SHOT)) {
1148                         unsigned int available = proc_get_mem_available();
1149
1150                         if (available >= ctl->size) {
1151                                 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
1152                                         ctl->size, available);
1153                                 ctl->status = LOWMEM_RECLAIM_DONE;
1154                                 if (ctl->callback)
1155                                         ctl->callback(ctl);
1156                                 LOWMEM_DESTROY_REQUEST(ctl);
1157                                 LOWMEM_WORKER_IDLE(lmw);
1158                                 continue;
1159                         }
1160
1161                         if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
1162                                 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
1163                                 ctl->flags |= OOM_REVISE;
1164                                 goto process_again;
1165                         }
1166                 }
1167
1168                 /*
1169                  * The ctl callback would check available size again.
1170                  * And it is last point in reclaiming worker.
1171                  * Resourced sent SIGKILL signal to victim processes
1172                  * so it should wait for a some seconds until each processes returns memory.
1173                  */
1174                 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
1175                 if (ctl->callback)
1176                         ctl->callback(ctl);
1177
1178                 /* The lmk becomes the owner of all queued requests .. */
1179                 LOWMEM_DESTROY_REQUEST(ctl);
1180                 LOWMEM_WORKER_IDLE(lmw);
1181         }
1182         g_async_queue_unref(lmw->queue);
1183         pthread_exit(NULL);
1184 }
1185
1186 static void change_lowmem_state(unsigned int mem_state)
1187 {
1188         cur_mem_state = mem_state;
1189         lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1190
1191         resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
1192                 (void *)&cur_mem_state);
1193 }
1194
1195 /* only app can call this function
1196  * that is, service cannot call the function
1197  */
1198 static void lowmem_swap_memory(char *path)
1199 {
1200         unsigned int available;
1201
1202         if (cur_mem_state == MEM_LEVEL_HIGH)
1203                 return;
1204
1205         if (swap_get_state() != SWAP_ON)
1206                 return;
1207
1208         available = proc_get_mem_available();
1209         if (cur_mem_state != MEM_LEVEL_LOW &&
1210             available <= get_root_memcg_info()->threshold[MEM_LEVEL_LOW])
1211                 swap_activate_act();
1212
1213         resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1214         memcg_swap_status = true;
1215 }
1216
1217 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
1218 {
1219         int error;
1220         int oom_score_adj;
1221         int lowest_oom_score_adj;
1222
1223         if (!path) {
1224                 _E("[SWAP] Unknown memory cgroup path to swap");
1225                 return;
1226         }
1227
1228         /* In this case, corresponding process will be moved to memory CGROUP_LOW.
1229          */
1230         if (move) {
1231                 error = proc_get_oom_score_adj(pid, &oom_score_adj);
1232                 if (error) {
1233                         _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
1234                         return;
1235                 }
1236
1237                 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(CGROUP_LOW);
1238
1239                 if (oom_score_adj < lowest_oom_score_adj) {
1240                         oom_score_adj = lowest_oom_score_adj;
1241                         /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
1242                         proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
1243                         return;
1244                 }
1245         }
1246
1247         /* Correponding process is already managed per app or service.
1248          * In addition, if some process is already located in the CGROUP_LOW, then just do swap
1249          */
1250         resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1251 }
1252
1253 static void memory_level_send_system_event(int lv)
1254 {
1255         bundle *b;
1256         const char *str;
1257
1258         switch (lv) {
1259                 case MEM_LEVEL_HIGH:
1260                 case MEM_LEVEL_MEDIUM:
1261                 case MEM_LEVEL_LOW:
1262                         str = EVT_VAL_MEMORY_NORMAL;
1263                         break;
1264                 case MEM_LEVEL_CRITICAL:
1265                         str = EVT_VAL_MEMORY_SOFT_WARNING;
1266                         break;
1267                 case MEM_LEVEL_OOM:
1268                         str = EVT_VAL_MEMORY_HARD_WARNING;
1269                         break;
1270                 default:
1271                         _E("Invalid state");
1272                         return;
1273         }
1274
1275         b = bundle_create();
1276         if (!b) {
1277                 _E("Failed to create bundle");
1278                 return;
1279         }
1280
1281         bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
1282         eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
1283         bundle_free(b);
1284 }
1285
1286 static void high_mem_act(void)
1287 {
1288         int ret, status;
1289
1290         ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1291         if (ret)
1292                 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1293         if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1294                 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1295                               VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1296                 memory_level_send_system_event(MEM_LEVEL_HIGH);
1297         }
1298
1299         change_lowmem_state(MEM_LEVEL_HIGH);
1300
1301         if (swap_get_state() == SWAP_ON && memcg_swap_status) {
1302                 resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(CGROUP_LOW));
1303                 memcg_swap_status = false;
1304         }
1305         if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1306                 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1307                         (void *)CGROUP_FREEZER_ENABLED);
1308 }
1309
1310 static void swap_activate_act(void)
1311 {
1312         int ret, status;
1313
1314         ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1315         if (ret)
1316                 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1317
1318         if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1319                 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1320                                 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1321                 memory_level_send_system_event(MEM_LEVEL_LOW);
1322         }
1323         change_lowmem_state(MEM_LEVEL_LOW);
1324         if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1325                 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1326                         (void *)CGROUP_FREEZER_ENABLED);
1327
1328         if (swap_get_state() != SWAP_ON)
1329                 resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
1330 }
1331
1332 static void dedup_act(enum ksm_scan_mode mode)
1333 {
1334         int ret, status;
1335         int data;
1336
1337         if (dedup_get_state() != DEDUP_ONE_SHOT)
1338                 return;
1339
1340         if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1341                 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1342                                 (void *)CGROUP_FREEZER_ENABLED);
1343
1344         if (mode == KSM_SCAN_PARTIAL) {
1345                 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1346                 if (ret)
1347                         _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1348
1349                 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1350                         vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1351                                         VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1352                         memory_level_send_system_event(MEM_LEVEL_MEDIUM);
1353                 }
1354                 change_lowmem_state(MEM_LEVEL_MEDIUM);
1355
1356                 data = KSM_SCAN_PARTIAL;
1357                 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1358         } else if (mode == KSM_SCAN_FULL) {
1359                 data = KSM_SCAN_FULL;
1360                 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1361         }
1362 }
1363
1364 static void swap_compact_act(void)
1365 {
1366         change_lowmem_state(MEM_LEVEL_CRITICAL);
1367         resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
1368         memory_level_send_system_event(MEM_LEVEL_CRITICAL);
1369 }
1370
1371 static void medium_cb(struct lowmem_control *ctl)
1372 {
1373         if (ctl->status == LOWMEM_RECLAIM_DONE)
1374                 oom_popup = false;
1375         lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1376 }
1377
1378 static void lmk_act(void)
1379 {
1380         unsigned int available;
1381         int ret;
1382         int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
1383
1384         /*
1385          * Don't trigger reclaim worker
1386          * if it is already running
1387          */
1388         if (LOWMEM_WORKER_IS_RUNNING(&lmw))
1389                 return;
1390
1391         ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1392         if (ret)
1393                 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1394
1395         memory_level_send_system_event(MEM_LEVEL_OOM);
1396         if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
1397                 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
1398                         resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1399                                 (void *)CGROUP_FREEZER_PAUSED);
1400                 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1401                               VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1402         }
1403         available = proc_get_mem_available();
1404
1405         change_lowmem_state(MEM_LEVEL_OOM);
1406
1407         if (available < get_root_memcg_info()->threshold_leave) {
1408                 struct lowmem_control *ctl;
1409
1410                 ctl = LOWMEM_NEW_REQUEST();
1411                 if (ctl) {
1412                         LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
1413                                 CGROUP_LOW, get_root_memcg_info()->threshold_leave,
1414                                 num_max_victims, medium_cb);
1415                         lowmem_queue_request(&lmw, ctl);
1416                 }
1417         }
1418
1419         resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
1420
1421         /*
1422          * Flush resourced memory such as other processes.
1423          * Resourced can use both many fast bins and sqlite3 cache memery.
1424          */
1425         malloc_trim(0);
1426
1427         return;
1428 }
1429
1430 static void lowmem_trigger_memory_state_action(int mem_state)
1431 {
1432         /*
1433          * Check if the state we want to set is different from current
1434          * But it should except this condition if mem_state is already medium.
1435          * Otherwise, recalim worker couldn't run any more.
1436          */
1437         if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
1438                 return;
1439
1440         switch (mem_state) {
1441         case MEM_LEVEL_HIGH:
1442                 high_mem_act();
1443                 break;
1444         case MEM_LEVEL_MEDIUM:
1445                 dedup_act(KSM_SCAN_PARTIAL);
1446                 break;
1447         case MEM_LEVEL_LOW:
1448                 swap_activate_act();
1449                 break;
1450         case MEM_LEVEL_CRITICAL:
1451                 dedup_act(KSM_SCAN_FULL);
1452                 swap_compact_act();
1453                 break;
1454         case MEM_LEVEL_OOM:
1455                 lmk_act();
1456                 break;
1457         default:
1458                 assert(0);
1459         }
1460 }
1461
1462 static void lowmem_dump_cgroup_procs(struct memcg_info *mi)
1463 {
1464         int i;
1465         unsigned int size;
1466         pid_t pid;
1467         GArray *pids_array = NULL;
1468
1469         cgroup_get_pids(mi->name, &pids_array);
1470
1471         for (i = 0; i < pids_array->len; i++) {
1472                 pid = g_array_index(pids_array, pid_t, i);
1473                 lowmem_mem_usage_uss(pid, &size);
1474                 _I("pid = %d, size = %u KB", pid, size);
1475         }
1476         g_array_free(pids_array, true);
1477 }
1478
1479 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi)
1480 {
1481         struct lowmem_control *ctl;
1482
1483         /* To Do: only start to kill fg victim when no pending fg victim */
1484         lowmem_dump_cgroup_procs(mi);
1485
1486         ctl = LOWMEM_NEW_REQUEST();
1487         if (ctl) {
1488                 LOWMEM_SET_REQUEST(ctl, OOM_SINGLE_SHOT | OOM_IN_DEPTH, type,
1489                         mi->oomleave, num_max_victims, NULL);
1490                 lowmem_queue_request(&lmw, ctl);
1491         }
1492 }
1493
1494 static unsigned int check_mem_state(unsigned int available)
1495 {
1496         int mem_state;
1497         for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
1498                 if (mem_state != MEM_LEVEL_OOM && available <= get_root_memcg_info()->threshold[mem_state])
1499                         break;
1500                 else if (mem_state == MEM_LEVEL_OOM && available <= lmk_start_threshold)
1501                         break;
1502         }
1503
1504         return mem_state;
1505 }
1506
1507 /*static int load_bg_reclaim_config(struct parse_result *result, void *user_data)
1508 {
1509         if (!result)
1510                 return RESOURCED_ERROR_INVALID_PARAMETER;
1511
1512         if (strncmp(result->section, MEM_BG_RECLAIM_SECTION, strlen(MEM_BG_RECLAIM_SECTION)+1))
1513                 return RESOURCED_ERROR_NONE;
1514
1515         if (!strncmp(result->name, MEM_BG_RECLAIM_STRING, strlen(MEM_BG_RECLAIM_STRING)+1)) {
1516                 if (!strncmp(result->value, "yes", strlen("yes")+1))
1517                         bg_reclaim = true;
1518                 else if (!strncmp(result->value, "no", strlen("no")+1))
1519                         bg_reclaim = false;
1520         }
1521
1522
1523         return RESOURCED_ERROR_NONE;
1524 }
1525
1526 static int load_popup_config(struct parse_result *result, void *user_data)
1527 {
1528         if (!result)
1529                 return RESOURCED_ERROR_INVALID_PARAMETER;
1530
1531         if (strncmp(result->section, MEM_POPUP_SECTION, strlen(MEM_POPUP_SECTION)+1))
1532                 return RESOURCED_ERROR_NONE;
1533
1534         if (!strncmp(result->name, MEM_POPUP_STRING, strlen(MEM_POPUP_STRING)+1)) {
1535                 if (!strncmp(result->value, "yes", strlen("yes")+1))
1536                         oom_popup_enable = true;
1537                 else if (!strncmp(result->value, "no", strlen("no")+1))
1538                         oom_popup_enable = false;
1539         }
1540
1541
1542         return RESOURCED_ERROR_NONE;
1543 }
1544
1545 static int load_mem_log_config(struct parse_result *result, void *user_data)
1546 {
1547         if (!result)
1548                 return RESOURCED_ERROR_INVALID_PARAMETER;
1549
1550         if (strncmp(result->section, MEM_LOGGING_SECTION, strlen(MEM_LOGGING_SECTION)+1))
1551                 return RESOURCED_ERROR_NONE;
1552
1553         if (!strncmp(result->name, "Enable", strlen("Enable")+1)) {
1554                 memlog_enabled = atoi(result->value);
1555         } else if (!strncmp(result->name, "LogPath", strlen("LogPath")+1)) {
1556                 memlog_path = strdup(result->value);
1557         } else if (!strncmp(result->name, "MaxNumLogfile", strlen("MaxNumLogfile")+1)) {
1558                 memlog_nr_max = atoi(result->value);
1559                 memlog_remove_batch_thres = (memlog_nr_max * 5) / 6;
1560         } else if (!strncmp(result->name, "PrefixMemps", strlen("PrefixMemps")+1)) {
1561                 memlog_prefix[MEMLOG_MEMPS] = strdup(result->value);
1562         } else if (!strncmp(result->name, "PrefixMempsMemLimit", strlen("PrefixMempsMemLimit")+1)) {
1563                 memlog_prefix[MEMLOG_MEMPS_MEMLIMIT] = strdup(result->value);
1564         }
1565
1566         return RESOURCED_ERROR_NONE;
1567 }
1568
1569 static int set_memory_config(struct parse_result *result, void *user_data)
1570 {
1571         if (!result)
1572                 return RESOURCED_ERROR_NONE;
1573
1574         if (strncmp(result->section, MEM_SECTION, strlen(MEM_SECTION)+1))
1575                 return RESOURCED_ERROR_NONE;
1576
1577         if (!strncmp(result->name, "ThresholdDedup", strlen("ThresholdDedup")+1)) {
1578                 int value = atoi(result->value);
1579                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, value);
1580         } else if (!strncmp(result->name, "ThresholdSwap", strlen("ThresholdSwap")+1)) {
1581                 int value = atoi(result->value);
1582                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, value);
1583         } else if (!strncmp(result->name, "ThresholdLow", strlen("ThresholdLow")+1)) {
1584                 int value = atoi(result->value);
1585                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, value);
1586         } else if (!strncmp(result->name, "ThresholdMedium", strlen("ThresholdMedium")+1)) {
1587                 int value = atoi(result->value);
1588                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, value);
1589         } else if (!strncmp(result->name, "ThresholdLeave", strlen("ThresholdLeave")+1)) {
1590                 int value = atoi(result->value);
1591                 memcg_set_leave_threshold(CGROUP_ROOT, value);
1592         } else if (!strncmp(result->name, "ThresholdRatioDedup", strlen("ThresholdRatioDedup")+1)) {
1593                 double ratio = atoi(result->value);
1594                 int value = (double)totalram * ratio / 100.0;
1595                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, BYTE_TO_MBYTE(value));
1596         } else if (!strncmp(result->name, "ThresholdRatioSwap", strlen("ThresholdRatioSwap")+1)) {
1597                 double ratio = atoi(result->value);
1598                 int value = (double)totalram * ratio / 100.0;
1599                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, BYTE_TO_MBYTE(value));
1600         } else if (!strncmp(result->name, "ThresholdRatioLow", strlen("ThresholdRatioLow")+1)) {
1601                 double ratio = atoi(result->value);
1602                 int value = (double)totalram * ratio / 100.0;
1603                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, BYTE_TO_MBYTE(value));
1604         } else if (!strncmp(result->name, "ThresholdRatioMedium", strlen("ThresholdRatioMedium")+1)) {
1605                 double ratio = atoi(result->value);
1606                 int value = (double)totalram * ratio / 100.0;
1607                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, BYTE_TO_MBYTE(value));
1608         } else if (!strncmp(result->name, "ThresholdRatioLeave", strlen("ThresholdRatioLeave")+1)) {
1609                 double ratio = atoi(result->value);
1610                 int value = (double)totalram * ratio / 100.0;
1611                 memcg_set_leave_threshold(CGROUP_ROOT, BYTE_TO_MBYTE(value));
1612         } else if (!strncmp(result->name, "ForegroundRatio", strlen("ForegroundRatio")+1)) {
1613                 float ratio = atof(result->value);
1614                 memcg_info_set_limit(get_memcg_info(CGROUP_HIGH), ratio, totalram);
1615         } else if (!strncmp(result->name, "BackgroundRatio", strlen("BackgroundRatio")+1)) {
1616                 float ratio = atof(result->value);
1617                 memcg_info_set_limit(get_memcg_info(CGROUP_MEDIUM), ratio, totalram);
1618         } else if (!strncmp(result->name, "LowRatio", strlen("LowRatio")+1)) {
1619                 float ratio = atof(result->value);
1620                 memcg_info_set_limit(get_memcg_info(CGROUP_LOW), ratio, totalram);
1621         } else if (!strncmp(result->name, "NumMaxVictims", strlen("NumMaxVictims")+1)) {
1622                 int value = atoi(result->value);
1623                 num_max_victims = value;
1624                 num_vict_between_check = value > MAX_MEMORY_CGROUP_VICTIMS/2
1625                                                 ? 3 : value > MAX_MEMORY_CGROUP_VICTIMS/4
1626                                                                 ? 2 : 1;
1627         } else if (!strncmp(result->name, "ProactiveThreshold", strlen("ProactiveThreshold")+1)) {
1628                 int value = atoi(result->value);
1629                 proactive_threshold = value;
1630         } else if (!strncmp(result->name, "ProactiveLeave", strlen("ProactiveLeave")+1)) {
1631                 int value = atoi(result->value);
1632                 proactive_leave = value;
1633         } else if (!strncmp(result->name, "EventLevel", strlen("EventLevel")+1)) {
1634                 if (strncmp(event_level, result->value, strlen(event_level)))
1635                         event_level = strdup(result->value);
1636                 if (!event_level)
1637                         return RESOURCED_ERROR_OUT_OF_MEMORY;
1638         } else if (!strncmp(result->name, "SWAPPINESS", strlen("SWAPPINESS")+1)) {
1639                 int value = atoi(result->value);
1640                 memcg_set_default_swappiness(value);
1641                 memcg_info_set_swappiness(get_memcg_info(CGROUP_ROOT), value);
1642         } else if (!strncmp(result->name, "FOREGROUND_SWAPPINESS", strlen("FOREGROUND_SWAPPINESS")+1)) {
1643                 int value = atoi(result->value);
1644                 memcg_info_set_swappiness(get_memcg_info(CGROUP_HIGH), value);
1645         } else if (!strncmp(result->name, "BACKGROUND_SWAPPINESS", strlen("BACKGROUND_SWAPPINESS")+1)) {
1646                 int value = atoi(result->value);
1647                 memcg_info_set_swappiness(get_memcg_info(CGROUP_MEDIUM), value);
1648         } else if (!strncmp(result->name, "LOW_SWAPPINESS", strlen("LOW_SWAPPINESS")+1)) {
1649                 int value = atoi(result->value);
1650                 memcg_info_set_swappiness(get_memcg_info(CGROUP_LOW), value);
1651         } else if (!strncmp(result->name, "NumFragSize", strlen("NumFragSize")+1)) {
1652                 fragmentation_size = atoi(result->value);
1653         }
1654
1655         return RESOURCED_ERROR_NONE;
1656 }*/
1657
1658 /* setup memcg parameters depending on total ram size. */
1659 static void setup_memcg_params(void)
1660 {
1661         unsigned long long total_ramsize;
1662
1663         get_total_memory();
1664         total_ramsize = BYTE_TO_MBYTE(totalram);
1665
1666         _D("Total: %llu MB", total_ramsize);
1667         if (total_ramsize <= MEM_SIZE_64) {
1668                 /* set thresholds for ram size 64M */
1669                 proactive_threshold = PROACTIVE_64_THRES;
1670                 proactive_leave = PROACTIVE_64_LEAVE;
1671                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
1672                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
1673                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
1674                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
1675                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
1676                 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
1677         } else if (total_ramsize <= MEM_SIZE_256) {
1678                 /* set thresholds for ram size 256M */
1679                 proactive_threshold = PROACTIVE_256_THRES;
1680                 proactive_leave = PROACTIVE_256_LEAVE;
1681                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
1682                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
1683                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
1684                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
1685                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
1686                 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
1687         } else if (total_ramsize <= MEM_SIZE_448) {
1688                 /* set thresholds for ram size 448M */
1689                 proactive_threshold = PROACTIVE_448_THRES;
1690                 proactive_leave = PROACTIVE_448_LEAVE;
1691                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
1692                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
1693                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
1694                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1695                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1696                 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1697         } else if (total_ramsize <= MEM_SIZE_512) {
1698                 /* set thresholds for ram size 512M */
1699                 proactive_threshold = PROACTIVE_512_THRES;
1700                 proactive_leave = PROACTIVE_512_LEAVE;
1701                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1702                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1703                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1704                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1705                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1706                 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1707         }  else if (total_ramsize <= MEM_SIZE_768) {
1708                 /* set thresholds for ram size 512M */
1709                 proactive_threshold = PROACTIVE_768_THRES;
1710                 proactive_leave = PROACTIVE_768_LEAVE;
1711                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1712                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1713                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1714                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1715                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1716                 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1717         } else if (total_ramsize <= MEM_SIZE_1024) {
1718                 /* set thresholds for ram size more than 1G */
1719                 proactive_threshold = PROACTIVE_1024_THRES;
1720                 proactive_leave = PROACTIVE_1024_LEAVE;
1721                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1722                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1723                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1724                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1725                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1726                 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1727         } else if (total_ramsize <= MEM_SIZE_2048) {
1728                 proactive_threshold = PROACTIVE_2048_THRES;
1729                 proactive_leave = PROACTIVE_2048_LEAVE;
1730                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1731                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1732                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1733                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1734                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1735                 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1736         } else {
1737                 proactive_threshold = PROACTIVE_3072_THRES;
1738                 proactive_leave = PROACTIVE_3072_LEAVE;
1739                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1740                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1741                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1742                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1743                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1744                 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1745         }
1746 }
1747
1748 static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
1749 {
1750         int cur_oom_score_adj;
1751         int cur_memcg_idx;
1752         struct memcg_info *mi;
1753         int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
1754
1755         if(next_memcg_idx < CGROUP_VIP || next_memcg_idx > CGROUP_LOW) {
1756                 _E("cgroup type (%d) should not be called", next_memcg_idx);
1757                 return;
1758         }
1759         mi = get_memcg_info(next_memcg_idx);
1760
1761         if (!mi) {
1762                 return;
1763         }
1764
1765         if (!pai) {
1766                 cgroup_write_pid_fullpath(mi->name, pid);
1767                 return;
1768         }
1769
1770         /* parent pid */
1771         if (pai->main_pid == pid) {
1772                 cur_oom_score_adj = pai->memory.oom_score_adj;
1773                 cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
1774
1775                 /* -1 means that this pid is not yet registered at the memory cgroup
1776                  * plz, reference proc_create_app_info function
1777                  */
1778                 if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
1779                         /* VIP processes should not be asked to move. */
1780                         if (cur_memcg_idx <= CGROUP_VIP) {
1781                                 _E("[DEBUG] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
1782                                 return;
1783                         }
1784                 }
1785
1786                 _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
1787
1788                 if (cur_oom_score_adj == next_oom_score_adj) {
1789                         _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
1790                         return;
1791                 }
1792
1793                 proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
1794
1795                 if (!lowmem_limit_move_cgroup(pai))
1796                         return;
1797
1798                 if(cur_memcg_idx == next_memcg_idx)
1799                         return;
1800
1801                 cgroup_write_pid_fullpath(mi->name, pid);
1802                 if (next_memcg_idx == CGROUP_LOW)
1803                         lowmem_swap_memory(get_memcg_info(CGROUP_LOW)->name);
1804         }
1805         /* child pid */
1806         else {
1807                 if (pai->memory.use_mem_limit)
1808                         return;
1809
1810                 cgroup_write_pid_fullpath(mi->name, pid);
1811         }
1812 }
1813
1814 static int lowmem_activate_worker(void)
1815 {
1816         int ret = RESOURCED_ERROR_NONE;
1817
1818         if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1819                 return ret;
1820         }
1821
1822         lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1823         if (!lmw.queue) {
1824                 _E("Failed to create request queue\n");
1825                 return RESOURCED_ERROR_FAIL;
1826         }
1827         LOWMEM_WORKER_ACTIVATE(&lmw);
1828         ret = pthread_create(&lmw.worker_thread, NULL,
1829                 (void *)lowmem_reclaim_worker, (void *)&lmw);
1830         if (ret) {
1831                 LOWMEM_WORKER_DEACTIVATE(&lmw);
1832                 _E("Failed to create LMK thread: %d\n", ret);
1833         } else {
1834                 pthread_detach(lmw.worker_thread);
1835                 ret = RESOURCED_ERROR_NONE;
1836         }
1837         return ret;
1838 }
1839
1840 static void lowmem_deactivate_worker(void)
1841 {
1842         struct lowmem_control *ctl;
1843
1844         if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1845                 return;
1846
1847         LOWMEM_WORKER_DEACTIVATE(&lmw);
1848         lowmem_drain_queue(&lmw);
1849
1850         ctl = LOWMEM_NEW_REQUEST();
1851         if (!ctl) {
1852                 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1853                 return;
1854         }
1855         ctl->flags = OOM_DROP;
1856         g_async_queue_push(lmw.queue, ctl);
1857         g_async_queue_unref(lmw.queue);
1858 }
1859
1860 static int lowmem_press_eventfd_read(int fd)
1861 {
1862         uint64_t dummy_state;
1863
1864         return read(fd, &dummy_state, sizeof(dummy_state));
1865 }
1866
1867 static void lowmem_press_root_cgroup_handler(void)
1868 {
1869         static unsigned int prev_available;
1870         unsigned int available;
1871         int mem_state;
1872
1873         available = proc_get_mem_available();
1874         if (prev_available == available)
1875                 return;
1876
1877         mem_state = check_mem_state(available);
1878         lowmem_trigger_memory_state_action(mem_state);
1879
1880         prev_available = available;
1881 }
1882
1883 static void lowmem_press_cgroup_handler(enum cgroup_type type, struct memcg_info *mi)
1884 {
1885         unsigned int usage, threshold;
1886         int ret;
1887
1888         ret = memcg_get_anon_usage(mi->name, &usage);
1889         if (ret) {
1890                 _D("getting anonymous memory usage fails");
1891                 return;
1892         }
1893
1894         threshold = mi->threshold[MEM_LEVEL_OOM];
1895         if (usage >= threshold)
1896                 memory_cgroup_proactive_lmk_act(type, mi);
1897         else
1898                 _I("anon page %u MB < medium threshold %u MB", BYTE_TO_MBYTE(usage),
1899                                 BYTE_TO_MBYTE(threshold));
1900 }
1901
1902 static bool lowmem_press_eventfd_handler(int fd, void *data)
1903 {
1904         struct memcg_info *mi;
1905         enum cgroup_type type = CGROUP_ROOT;
1906
1907         // FIXME: probably shouldn't get ignored
1908         if (lowmem_press_eventfd_read(fd) < 0)
1909                 _E("Failed to read lowmem press event, %m\n");
1910
1911         for (type = CGROUP_ROOT; type < CGROUP_END; type++) {
1912                 if (!get_cgroup_tree(type) || !get_memcg_info(type))
1913                         continue;
1914                 mi = get_memcg_info(type);
1915                 if (fd == mi->evfd) {
1916                         /* call low memory handler for this memcg */
1917                         if (type == CGROUP_ROOT)
1918                                 lowmem_press_root_cgroup_handler();
1919                         else {
1920                                 lowmem_press_cgroup_handler(type, mi);
1921                         }
1922                         return true;
1923                 }
1924         }
1925
1926         return true;
1927 }
1928
1929 static int lowmem_press_register_eventfd(struct memcg_info *mi)
1930 {
1931         int evfd;
1932         const char *name = mi->name;
1933         static fd_handler_h handler;
1934
1935         if (mi->threshold[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
1936                 return 0;
1937
1938         evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
1939                         event_level);
1940
1941         if (evfd < 0) {
1942                 int saved_errno = errno;
1943                 _E("fail to register event press fd %s cgroup", name);
1944                 return -saved_errno;
1945         }
1946
1947         mi->evfd = evfd;
1948
1949         _I("register event fd success for %s cgroup", name);
1950         add_fd_read_handler(evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
1951         return 0;
1952 }
1953
1954 static int lowmem_press_setup_eventfd(void)
1955 {
1956         unsigned int i;
1957
1958         for (i = CGROUP_ROOT; i < CGROUP_END; i++) {
1959                 if (!get_use_hierarchy(i))
1960                         continue;
1961
1962                 lowmem_press_register_eventfd(get_memcg_info(i));
1963         }
1964         return RESOURCED_ERROR_NONE;
1965 }
1966
1967 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1968 {
1969         lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1970 }
1971
1972 int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold)
1973 {
1974         struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1975
1976         if (!ctl)
1977                 return -ENOMEM;
1978
1979         flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1980         victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1981         type = type > 0 ? type : CGROUP_LOW;
1982         threshold = threshold > 0 ? threshold : get_root_memcg_info()->threshold_leave;
1983
1984         lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1985         LOWMEM_SET_REQUEST(ctl, flags,
1986                 type, threshold, victims,
1987                 lowmem_force_reclaim_cb);
1988         lowmem_queue_request(&lmw, ctl);
1989
1990         return 0;
1991 }
1992
1993 void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size)
1994 {
1995         int size, victims;
1996
1997         victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
1998                                  ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1999
2000         size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
2001         _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
2002         lowmem_trigger_reclaim(0, victims, type, size);
2003 }
2004
2005 bool lowmem_fragmentated(void)
2006 {
2007         struct buddyinfo bi;
2008         int ret;
2009
2010         ret = proc_get_buddyinfo("Normal", &bi);
2011         if (ret < 0)
2012                 return false;
2013
2014         /*
2015          * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
2016          * If total buddy pages is smaller than fragmentation_size,
2017          * resourced will detect kernel memory is fragmented.
2018          * Default value is zero in low memory device.
2019          */
2020         if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
2021                 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
2022                 _I("fragmentation detected, need to execute proactive oom killer");
2023                 return true;
2024         }
2025         return false;
2026 }
2027
2028 static void lowmem_proactive_oom_killer(int flags, char *appid)
2029 {
2030         unsigned int before;
2031         int victims;
2032
2033         before = proc_get_mem_available();
2034
2035         /* If memory state is medium or normal, just return and kill in oom killer */
2036         if (before < get_root_memcg_info()->threshold[MEM_LEVEL_OOM] || before > proactive_leave)
2037                 return;
2038
2039         victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
2040                                  ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2041
2042 #ifdef HEART_SUPPORT
2043         /*
2044          * This branch is used only when HEART module is compiled in and
2045          * it's MEMORY module must be enabled. Otherwise this is skipped.
2046          */
2047         struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
2048         if (md) {
2049                 unsigned int rss, after, size;
2050
2051                 rss = KBYTE_TO_MBYTE(md->avg_rss);
2052
2053                 free(md);
2054
2055                 after = before - rss;
2056                 /*
2057                  * after launching app, ensure that available memory is
2058                  * above threshold_leave
2059                  */
2060                 if (after >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2061                         return;
2062
2063                 if (proactive_threshold - rss >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2064                         size = proactive_threshold;
2065                 else
2066                         size = rss + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
2067
2068                 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
2069                         rss, before, size);
2070                 lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
2071
2072                 return;
2073         }
2074 #endif
2075
2076         /*
2077          * When there is no history data for the launching app,
2078          * it is necessary to check current fragmentation state or application manifest file.
2079          * So, resourced feels proactive LMK is required, run oom killer based on dynamic
2080          * threshold.
2081          */
2082         if (lowmem_fragmentated())
2083                 goto reclaim;
2084
2085         /*
2086          * run proactive oom killer only when available is larger than
2087          * dynamic process threshold
2088          */
2089         if (!proactive_threshold || before >= proactive_threshold)
2090                 return;
2091
2092         if (!(flags & PROC_LARGEMEMORY))
2093                 return;
2094
2095 reclaim:
2096         /*
2097          * free THRESHOLD_MARGIN more than real should be freed,
2098          * because launching app is consuming up the memory.
2099          */
2100         _D("Run threshold based proactive LMK: memory level to reach: %u\n",
2101                 proactive_leave + THRESHOLD_MARGIN);
2102         lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
2103 }
2104
2105 unsigned int lowmem_get_proactive_thres(void)
2106 {
2107         return proactive_threshold;
2108 }
2109
2110 static int lowmem_prelaunch_handler(void *data)
2111 {
2112         struct proc_status *ps = (struct proc_status *)data;
2113         struct proc_app_info *pai = ps->pai;
2114
2115         if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
2116                 return RESOURCED_ERROR_NONE;
2117
2118         lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
2119         return RESOURCED_ERROR_NONE;
2120 }
2121
2122 int lowmem_control_handler(void *data)
2123 {
2124         struct lowmem_control_data *lowmem_data;
2125
2126         lowmem_data = (struct lowmem_control_data *)data;
2127         switch (lowmem_data->control_type) {
2128         case LOWMEM_MOVE_CGROUP:
2129                 lowmem_move_memcgroup((pid_t)lowmem_data->pid,
2130                                         lowmem_data->oom_score_adj, lowmem_data->pai);
2131                 break;
2132         default:
2133                 break;
2134         }
2135         return RESOURCED_ERROR_NONE;
2136 }
2137
2138 static int lowmem_bg_reclaim_handler(void *data)
2139 {
2140         if (swap_get_state() != SWAP_ON)
2141                 return RESOURCED_ERROR_NONE;
2142
2143         if (!bg_reclaim)
2144                 return RESOURCED_ERROR_NONE;
2145
2146         /*
2147          * Proactively reclaiming memory used by long-lived background processes
2148          * (such as widget instances) may be efficient on devices with limited
2149          * memory constraints. The pages used by such processes could be reclaimed
2150          * (if swap is enabled) earlier than they used to while minimizing the
2151          * impact on the user experience.
2152          */
2153         resourced_notify(RESOURCED_NOTIFIER_SWAP_START, get_memcg_info(CGROUP_MEDIUM)->name);
2154
2155         return RESOURCED_ERROR_NONE;
2156 }
2157
2158 static int calculate_threshold_size(double ratio)
2159 {
2160         int size = (double)totalram * ratio / 100.0;
2161         return size;
2162 }
2163
2164 static void load_configs(const char *path)
2165 {
2166         struct memcg_conf *memcg_conf = get_memcg_conf();
2167
2168         /* set MemoryGroupLimit section */
2169         for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
2170                 if (memcg_conf->cgroup_limit[cgroup] > 0.0)
2171                         memcg_info_set_limit(get_memcg_info(cgroup),
2172                                         memcg_conf->cgroup_limit[cgroup]/100.0, totalram);
2173         }
2174
2175         /* set MemoryLevelThreshold section */
2176         for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
2177                 if (memcg_conf->threshold[lvl].percent &&
2178                         memcg_conf->threshold[lvl].threshold > 0)
2179                         memcg_set_threshold(CGROUP_ROOT, lvl,
2180                                         calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
2181                 else if (memcg_conf->threshold[lvl].threshold > 0)
2182                         memcg_set_threshold(CGROUP_ROOT, lvl,
2183                                         memcg_conf->threshold[lvl].threshold);
2184         }
2185         oom_popup_enable = memcg_conf->oom_popup;
2186
2187         /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
2188         lowmem_memory_init(memcg_conf->service.memory, memcg_conf->widget.memory,
2189                         memcg_conf->guiapp.memory, memcg_conf->background.memory);
2190         lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
2191                         memcg_conf->guiapp.action, memcg_conf->background.action);
2192
2193         free_memcg_conf();
2194 }
2195
2196 static void print_mem_configs(void)
2197 {
2198         /* print info of Memory section */
2199         for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
2200                 _I("[DEBUG] set memory for cgroup '%s' to %u bytes",
2201                                 convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit);     
2202         }
2203
2204         for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++)
2205                 _I("[DEBUG] set threshold for memory level '%s' to %u MB",
2206                                 convert_memstate_to_str(mem_lvl), get_root_memcg_info()->threshold[mem_lvl]);
2207
2208         _I("[DEBUG] set number of max victims as %d", num_max_victims);
2209         _I("[DEBUG] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
2210         _I("[DEBUG] set proactive threshold to %u MB", proactive_threshold);
2211         _I("[DEBUG] set proactive low memory killer leave to %u MB", proactive_leave);
2212
2213         /* print info of POPUP section */
2214         _I("[DEBUG] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
2215
2216         /* print info of BackgroundReclaim section */
2217         _I("[DEBUG] Background reclaim is %s", bg_reclaim == true ? "enabled" : "disabled");
2218
2219         /* print info of Logging section */
2220 /*      _I("memory logging is %s", memlog_enabled == 1 ? "enabled" : "disabled");
2221         _I("memory logging path is %s", memlog_path);
2222         _I("the max number of memory logging is %d", memlog_nr_max);
2223         _I("the batch threshold of memory log is %d", memlog_remove_batch_thres);
2224         _I("prefix of memps is %s", memlog_prefix[MEMLOG_MEMPS]);
2225         _I("prefix of memlimit memps is %s", memlog_prefix[MEMLOG_MEMPS_MEMLIMIT]);*/
2226 }
2227
2228 #include "file-helper.h"
2229
2230 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
2231 static int lowmem_init(void)
2232 {
2233         int ret = RESOURCED_ERROR_NONE;
2234
2235         _D("[DEBUG] resourced memory init start");
2236
2237         /* init memcg */
2238         ret = cgroup_make_full_subdir(MEMCG_PATH);
2239         ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
2240         memcg_params_init();
2241
2242         setup_memcg_params();
2243
2244         /* default configuration */
2245         load_configs(MEM_CONF_FILE);
2246
2247         /* this function should be called after parsing configurations */
2248         memcg_write_limiter_params();
2249         print_mem_configs();
2250
2251         /* make a worker thread called low memory killer */
2252         ret = lowmem_activate_worker();
2253         if (ret) {
2254                 _E("[DEBUG] oom thread create failed\n");
2255                 return ret;
2256         }
2257
2258         /* register threshold and event fd */
2259         ret = lowmem_press_setup_eventfd();
2260         if (ret) {
2261                 _E("[DEBUG] eventfd setup failed");
2262                 return ret;
2263         }
2264
2265         lowmem_dbus_init();
2266         lowmem_limit_init();
2267         lowmem_system_init();
2268
2269         register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2270         register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2271         register_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2272
2273         return ret;
2274 }
2275
2276 static int lowmem_exit(void)
2277 {
2278         if (strncmp(event_level, MEMCG_DEFAULT_EVENT_LEVEL, sizeof(MEMCG_DEFAULT_EVENT_LEVEL)))
2279                 free(event_level);
2280
2281         lowmem_deactivate_worker();
2282         lowmem_limit_exit();
2283         lowmem_system_exit();
2284
2285         unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2286         unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2287         unregister_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2288
2289         return RESOURCED_ERROR_NONE;
2290 }
2291
2292 static int resourced_memory_init(void *data)
2293 {
2294         lowmem_ops = &memory_modules_ops;
2295         return lowmem_init();
2296 }
2297
2298 static int resourced_memory_finalize(void *data)
2299 {
2300         return lowmem_exit();
2301 }
2302
2303 void lowmem_change_memory_state(int state, int force)
2304 {
2305         int mem_state;
2306
2307         if (force) {
2308                 mem_state = state;
2309         } else {
2310                 unsigned int available = proc_get_mem_available();
2311                 mem_state = check_mem_state(available);
2312         }
2313
2314         lowmem_trigger_memory_state_action(mem_state);
2315 }
2316
2317 unsigned long lowmem_get_ktotalram(void)
2318 {
2319         return ktotalram;
2320 }
2321
2322 unsigned long lowmem_get_totalram(void)
2323 {
2324         return totalram;
2325 }
2326
2327 void lowmem_restore_memcg(struct proc_app_info *pai)
2328 {
2329         char *cgpath;
2330         int index, ret;
2331         struct cgroup *cgroup = NULL;
2332         struct memcg_info *mi = NULL;
2333         pid_t pid = pai->main_pid;
2334
2335         ret = cgroup_pid_get_path("memory", pid, &cgpath);
2336         if (ret < 0)
2337                 return;
2338
2339         for (index = CGROUP_END-1; index >= CGROUP_ROOT; index--) {
2340                 cgroup = get_cgroup_tree(index);
2341                 if (!cgroup)
2342                         continue;
2343
2344                 mi = cgroup->memcg_info;
2345                 if (!mi)
2346                         continue;
2347
2348                 if (!strcmp(cgroup->hashname, ""))
2349                         continue;
2350                 if (strstr(cgpath, cgroup->hashname))
2351                         break;
2352         }
2353         pai->memory.memcg_idx = index;
2354         pai->memory.memcg_info = mi;
2355         if(strstr(cgpath, pai->appid))
2356                 pai->memory.use_mem_limit = true;
2357
2358         free(cgpath);
2359 }
2360
2361 static struct module_ops memory_modules_ops = {
2362         .priority       = MODULE_PRIORITY_HIGH,
2363         .name           = "lowmem",
2364         .init           = resourced_memory_init,
2365         .exit           = resourced_memory_finalize,
2366 };
2367
2368 MODULE_REGISTER(&memory_modules_ops)