86abdd8c39b65c67b7bd7eca5fe6da3c9dffe781
[platform/core/system/resourced.git] / ;
1 /*
2  * resourced
3  *
4  * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18
19 /*
20  * @file vmpressure-lowmem-handler.c
21  *
22  * @desc lowmem handler using memcgroup
23  *
24  * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
25  *
26  */
27
28 #include <stdio.h>
29 #include <fcntl.h>
30 #include <assert.h>
31 #include <limits.h>
32 #include <vconf.h>
33 #include <unistd.h>
34 #include <time.h>
35 #include <limits.h>
36 #include <dirent.h>
37 #include <sys/time.h>
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <sys/shm.h>
41 #include <sys/sysinfo.h>
42 #include <sys/time.h>
43 #include <sys/resource.h>
44 #include <ctype.h>
45 #include <bundle.h>
46 #include <eventsystem.h>
47 #include <malloc.h>
48
49 #include "trace.h"
50 #include "cgroup.h"
51 #include "lowmem-handler.h"
52 #include "proc-common.h"
53 #include "procfs.h"
54 #include "freezer.h"
55 #include "resourced.h"
56 #include "macro.h"
57 #include "notifier.h"
58 #include "config-parser.h"
59 #include "module.h"
60 #include "swap-common.h"
61 #include "cgroup.h"
62 #include "memory-cgroup.h"
63 #include "heart-common.h"
64 #include "proc-main.h"
65 #include "dbus-handler.h"
66 #include "util.h"
67 #include "fd-handler.h"
68 #include "resourced-helper-worker.h"
69 #include "safe-kill.h"
70 #include "dedup-common.h"
71
72 #define LOWMEM_THRES_INIT               0
73
74 #define MEMPS_EXEC_PATH                 "usr/bin/memps"
75 #define MEM_CONF_FILE                   RD_CONFIG_FILE(limiter)
76 #define MEM_SECTION             "Memory"
77 #define MEM_VIP_SECTION                 "VIP_PROCESS"
78 #define MEM_VIP_PREDEFINE               "PREDEFINE"
79 #define MEM_POPUP_SECTION               "POPUP"
80 #define MEM_POPUP_STRING                "oom_popup"
81 #define MEM_BG_RECLAIM_SECTION  "BackgroundReclaim"
82 #define MEM_BG_RECLAIM_STRING   "AfterScreenDim"
83 #define MEM_LOGGING_SECTION             "Logging"
84
85 #define BUF_MAX                         1024
86 #define MAX_VICTIMS_BETWEEN_CHECK       3
87 #define MAX_PROACTIVE_LOW_VICTIMS       2
88 #define MAX_PROACTIVE_HIGH_VICTIMS      4
89 #define FOREGROUND_VICTIMS              1
90 #define OOM_TIMER_INTERVAL              2
91 #define OOM_KILLER_PRIORITY             -20
92 #define THRESHOLD_MARGIN                10 /* MB */
93
94 #define MEM_SIZE_64                     64  /* MB */
95 #define MEM_SIZE_256                    256 /* MB */
96 #define MEM_SIZE_448                    448 /* MB */
97 #define MEM_SIZE_512                    512 /* MB */
98 #define MEM_SIZE_768                    768 /* MB */
99 #define MEM_SIZE_1024                   1024 /* MB */
100 #define MEM_SIZE_2048                   2048 /* MB */
101
102 /* thresholds for 64M RAM*/
103 #define PROACTIVE_64_THRES                      10 /* MB */
104 #define PROACTIVE_64_LEAVE                      30 /* MB */
105 #define CGROUP_ROOT_64_THRES_DEDUP              16 /* MB */
106 #define CGROUP_ROOT_64_THRES_SWAP               15 /* MB */
107 #define CGROUP_ROOT_64_THRES_LOW                8  /* MB */
108 #define CGROUP_ROOT_64_THRES_MEDIUM             5  /* MB */
109 #define CGROUP_ROOT_64_THRES_LEAVE              8  /* MB */
110 #define CGROUP_ROOT_64_NUM_VICTIMS              1
111
112 /* thresholds for 256M RAM */
113 #define PROACTIVE_256_THRES                     50 /* MB */
114 #define PROACTIVE_256_LEAVE                     80 /* MB */
115 #define CGROUP_ROOT_256_THRES_DEDUP     60 /* MB */
116 #define CGROUP_ROOT_256_THRES_SWAP              40 /* MB */
117 #define CGROUP_ROOT_256_THRES_LOW               20 /* MB */
118 #define CGROUP_ROOT_256_THRES_MEDIUM            10 /* MB */
119 #define CGROUP_ROOT_256_THRES_LEAVE             20 /* MB */
120 #define CGROUP_ROOT_256_NUM_VICTIMS             2
121
122 /* threshold for 448M RAM */
123 #define PROACTIVE_448_THRES                     80 /* MB */
124 #define PROACTIVE_448_LEAVE                     100 /* MB */
125 #define CGROUP_ROOT_448_THRES_DEDUP     120 /* MB */
126 #define CGROUP_ROOT_448_THRES_SWAP              100 /* MB */
127 #define CGROUP_ROOT_448_THRES_LOW               60  /* MB */
128 #define CGROUP_ROOT_448_THRES_MEDIUM            50  /* MB */
129 #define CGROUP_ROOT_448_THRES_LEAVE             70  /* MB */
130 #define CGROUP_ROOT_448_NUM_VICTIMS             5
131
132 /* threshold for 512M RAM */
133 #define PROACTIVE_512_THRES                     100 /* MB */
134 #define PROACTIVE_512_LEAVE                     80 /* MB */
135 #define CGROUP_ROOT_512_THRES_DEDUP     140 /* MB */
136 #define CGROUP_ROOT_512_THRES_SWAP              100 /* MB */
137 #define CGROUP_ROOT_512_THRES_LOW               70  /* MB */
138 #define CGROUP_ROOT_512_THRES_MEDIUM            60  /* MB */
139 #define CGROUP_ROOT_512_THRES_LEAVE             80  /* MB */
140 #define CGROUP_ROOT_512_NUM_VICTIMS             5
141
142 /* threshold for 768 RAM */
143 #define PROACTIVE_768_THRES                     100 /* MB */
144 #define PROACTIVE_768_LEAVE                     130 /* MB */
145 #define CGROUP_ROOT_768_THRES_DEDUP     180 /* MB */
146 #define CGROUP_ROOT_768_THRES_SWAP              150 /* MB */
147 #define CGROUP_ROOT_768_THRES_LOW               90  /* MB */
148 #define CGROUP_ROOT_768_THRES_MEDIUM            80  /* MB */
149 #define CGROUP_ROOT_768_THRES_LEAVE             100  /* MB */
150 #define CGROUP_ROOT_768_NUM_VICTIMS             5
151
152 /* threshold for more than 1024M RAM */
153 #define PROACTIVE_1024_THRES                    230 /* MB */
154 #define PROACTIVE_1024_LEAVE                    150 /* MB */
155 #define CGROUP_ROOT_1024_THRES_DEDUP            400 /* MB */
156 #define CGROUP_ROOT_1024_THRES_SWAP             300 /* MB */
157 #define CGROUP_ROOT_1024_THRES_LOW              120 /* MB */
158 #define CGROUP_ROOT_1024_THRES_MEDIUM           100 /* MB */
159 #define CGROUP_ROOT_1024_THRES_LEAVE            150 /* MB */
160 #define CGROUP_ROOT_1024_NUM_VICTIMS            5
161
162 /* threshold for more than 2048M RAM */
163 #define PROACTIVE_2048_THRES                    200 /* MB */
164 #define PROACTIVE_2048_LEAVE                    500 /* MB */
165 #define CGROUP_ROOT_2048_THRES_DEDUP            400 /* MB */
166 #define CGROUP_ROOT_2048_THRES_SWAP             300 /* MB */
167 #define CGROUP_ROOT_2048_THRES_LOW              200 /* MB */
168 #define CGROUP_ROOT_2048_THRES_MEDIUM           160 /* MB */
169 #define CGROUP_ROOT_2048_THRES_LEAVE            300 /* MB */
170 #define CGROUP_ROOT_2048_NUM_VICTIMS            10
171
172 /* threshold for more than 3072M RAM */
173 #define PROACTIVE_3072_THRES                    300 /* MB */
174 #define PROACTIVE_3072_LEAVE                    700 /* MB */
175 #define CGROUP_ROOT_3072_THRES_DEDUP            600 /* MB */
176 #define CGROUP_ROOT_3072_THRES_SWAP             500 /* MB */
177 #define CGROUP_ROOT_3072_THRES_LOW              400 /* MB */
178 #define CGROUP_ROOT_3072_THRES_MEDIUM           250 /* MB */
179 #define CGROUP_ROOT_3072_THRES_LEAVE            500 /* MB */
180 #define CGROUP_ROOT_3072_NUM_VICTIMS            10
181
182 static unsigned proactive_threshold;
183 static unsigned proactive_leave;
184 static unsigned lmk_start_threshold;
185
186 static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
187
188 /**
189  * Resourced Low Memory Killer
190  * NOTE: planned to be moved to a separate file.
191  */
192 /*-------------------------------------------------*/
193 #define OOM_TIMER_INTERVAL_SEC  2
194 #define LMW_LOOP_WAIT_TIMEOUT_MSEC      OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
195 #define LMW_RETRY_WAIT_TIMEOUT_MSEC     (G_USEC_PER_SEC)
196
197 struct lowmem_control {
198         /*
199          * For each queued request the following properties
200          * are required with two exceptions:
201          *  - status is being set by LMK
202          *  - callback is optional
203          */
204         /* Processing flags*/
205         unsigned int flags;
206         /* Indictator for OOM score of targeted processes */
207         enum cgroup_type type;
208
209         /* Desired size to be restored - level to be reached (MB)*/
210         unsigned int size;
211         /* Max number of processes to be considered */
212         unsigned int count;
213         /* Memory reclaim status */
214         int status;
215         /*
216          * Optional - if set, will be triggered by LMK once the request
217          * is handled.
218          */
219         void (*callback) (struct lowmem_control *);
220 };
221
222 struct lowmem_worker {
223         pthread_t       worker_thread;
224         GAsyncQueue     *queue;
225         int             active;
226         int             running;
227 };
228
229 static struct lowmem_worker lmw;
230
231 static int memlog_enabled;
232 static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
233 /* remove logfiles to reduce to this threshold.
234  * it is about five-sixths of the memlog_nr_max. */
235 static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
236 static char *memlog_path = DEFAULT_MEMLOG_PATH;
237 static char *memlog_prefix[MEMLOG_MAX];
238
239 #define LOWMEM_WORKER_IS_ACTIVE(_lmw)   g_atomic_int_get(&(_lmw)->active)
240 #define LOWMEM_WORKER_ACTIVATE(_lmw)    g_atomic_int_set(&(_lmw)->active, 1)
241 #define LOWMEM_WORKER_DEACTIVATE(_lmw)  g_atomic_int_set(&(_lmw)->active, 0)
242
243 #define LOWMEM_WORKER_IS_RUNNING(_lmw)  g_atomic_int_get(&(_lmw)->running)
244 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
245 #define LOWMEM_WORKER_IDLE(_lmw)        g_atomic_int_set(&(_lmw)->running, 0)
246
247 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
248
249 #define LOWMEM_DESTROY_REQUEST(_ctl)            \
250         g_slice_free(typeof(*(_ctl)), _ctl);    \
251
252 #define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb)   \
253 {                                                                       \
254         (c)->flags      = __flags; (c)->type    = __type;               \
255         (c)->size       = __size;  (c)->count   = __count;              \
256         (c)->callback   = __cb;                                         \
257 }
258
259 #define BUFF_MAX        255
260 #define APP_ATTR_PATH "/proc/%d/attr/current"
261
262 static int get_privilege(pid_t pid, char *name, size_t len)
263 {
264         char path[PATH_MAX];
265         char attr[BUFF_MAX];
266         size_t attr_len;
267         FILE *fp;
268
269         snprintf(path, sizeof(path), APP_ATTR_PATH, pid);
270
271         fp = fopen(path, "r");
272         if (!fp)
273                 return -errno;
274
275         attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
276         fclose(fp);
277         if (attr_len <= 0)
278                 return -ENOENT;
279
280         attr[attr_len] = '\0';
281
282         snprintf(name, len, "%s", attr);
283         return 0;
284 }
285
286 static int is_app(pid_t pid)
287 {
288         char attr[BUFF_MAX];
289         size_t len;
290         int ret;
291
292         ret = get_privilege(pid, attr, sizeof(attr));
293         if (ret < 0) {
294                 _E("Failed to get privilege of PID(%d).", pid);
295                 return -1;
296         }
297
298         len = strlen(attr) + 1;
299
300         if (!strncmp("System", attr, len))
301                 return 0;
302
303         if (!strncmp("User", attr, len))
304                 return 0;
305
306         if (!strncmp("System::Privileged", attr, len))
307                 return 0;
308
309         return 1;
310 }
311
312
313 static void lowmem_queue_request(struct lowmem_worker *lmw,
314                                 struct lowmem_control *ctl)
315 {
316         if (LOWMEM_WORKER_IS_ACTIVE(lmw))
317                 g_async_queue_push(lmw->queue, ctl);
318 }
319
320 /* internal */
321 static void lowmem_drain_queue(struct lowmem_worker *lmw)
322 {
323         struct lowmem_control *ctl;
324
325         g_async_queue_lock(lmw->queue);
326         while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
327                 if (ctl->callback)
328                         ctl->callback(ctl);
329                 LOWMEM_DESTROY_REQUEST(ctl);
330         }
331         g_async_queue_unlock(lmw->queue);
332 }
333
334 static void lowmem_request_destroy(gpointer data)
335 {
336         struct lowmem_control *ctl = (struct lowmem_control*) data;
337
338         if (ctl->callback)
339                 ctl->callback(ctl);
340         LOWMEM_DESTROY_REQUEST(ctl);
341 }
342
343 /*-------------------------------------------------*/
344
345 /* low memory action function for cgroup */
346 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi);
347 /* low memory action function */
348 static void high_mem_act(void);
349 static void swap_activate_act(void);
350 static void swap_compact_act(void);
351 static void lmk_act(void);
352
353
354 static size_t cur_mem_state = MEM_LEVEL_HIGH;
355 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
356 static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
357
358 static unsigned long totalram;
359 static unsigned long ktotalram;
360
361 static struct module_ops memory_modules_ops;
362 static const struct module_ops *lowmem_ops;
363 static bool oom_popup_enable;
364 static bool oom_popup;
365 static bool memcg_swap_status;
366 static bool bg_reclaim;
367 static int fragmentation_size;
368
369 static const char *convert_cgroup_type_to_str(int type)
370 {
371         static const char *type_table[] =
372         {"/", "VIP", "High", "Medium", "Lowest"};
373         if (type >= CGROUP_ROOT && type <= CGROUP_LOW)
374                 return type_table[type];
375         else
376                 return "Error";
377 }
378
379 static const char *convert_status_to_str(int status)
380 {
381         static const char *status_table[] =
382         {"none", "done", "drop", "cont", "retry", "next_type"};
383         if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
384                 return status_table[status];
385         return "error status";
386 }
387
388 static const char *convert_memstate_to_str(int mem_state)
389 {
390         static const char *state_table[] = {"mem normal", "mem dedup", "mem swap", "mem low",
391                         "mem medium"};
392         if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
393                 return state_table[mem_state];
394         return "";
395 }
396
397 static int lowmem_launch_oompopup(void)
398 {
399         GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
400         g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
401
402         GVariant *const params = g_variant_new("(a{ss})", gv_builder);
403         g_variant_builder_unref(gv_builder);
404
405         int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
406                 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
407                 "PopupLaunch", params);
408
409         g_variant_unref(params);
410
411         return ret;
412 }
413
414 static inline void get_total_memory(void)
415 {
416         struct sysinfo si;
417         if (totalram)
418                 return;
419
420         if (!sysinfo(&si)) {
421                 totalram = si.totalram;
422                 ktotalram = BYTE_TO_KBYTE(totalram);
423         }
424 }
425
426 static int lowmem_mem_usage_uss(pid_t pid, unsigned int *usage)
427 {
428         unsigned int uss, zram = 0;
429         int ret;
430
431         *usage = 0;
432
433         /*
434          * In lowmem we need to know memory size of processes to
435          * for terminating apps. To get most real value of usage
436          * we should use USS + ZRAM usage for selected process.
437          *
438          * Those values will contain the most approximated amount
439          * of memory that will be freed after process termination.
440          */
441         ret = proc_get_uss(pid, &uss);
442         if (ret != RESOURCED_ERROR_NONE)
443                 return ret;
444
445         if (swap_get_state() == SWAP_ON) {
446                 ret = proc_get_zram_usage(pid, &zram);
447                 /* If we don't get zram usage, it's not a problem */
448                 if (ret != RESOURCED_ERROR_NONE)
449                         zram = 0;
450         }
451         *usage = uss + zram;
452         return RESOURCED_ERROR_NONE;
453 }
454
455 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
456 {
457         unsigned int size = 0, total_size = 0;
458         int index, ret;
459         pid_t pid;
460
461         /*
462          * If pids are allocated only when there are multiple processes with
463          * the same pgid e.g., browser and web process. Mostly, single process
464          * is used.
465          */
466         if (tsk->pids == NULL) {
467                 ret = proc_get_ram_usage(tsk->pid, &size);
468
469                 /* If there is no proc entry for given pid the process
470                  * should be abandoned during further processing
471                  */
472                 if (ret < 0)
473                         _D("failed to get rss memory usage of %d", tsk->pid);
474
475                 return size;
476         }
477
478         for (index = 0; index < tsk->pids->len; index++) {
479                 pid = g_array_index(tsk->pids, pid_t, index);
480                 ret = proc_get_ram_usage(pid, &size);
481                 if (ret != RESOURCED_ERROR_NONE)
482                         continue;
483                 total_size += size;
484         }
485
486         return total_size;
487 }
488
489 static int memps_file_select(const struct dirent *entry)
490 {
491         return strstr(entry->d_name, "memps") ? 1 : 0;
492 }
493
494 static char *strrstr(const char *str, const char *token)
495 {
496         int len = strlen(token);
497         const char *p = str + strlen(str);
498
499         while (str <= --p)
500                 if (p[0] == token[0] && strncmp(p, token, len) == 0)
501                         return (char *)p;
502
503         return NULL;
504 }
505
506 static int timesort(const struct dirent **a, const struct dirent **b)
507 {
508         long long time1 = 0;
509         long long time2 = 0;
510         char *ptr;
511
512         ptr = strrstr((*a)->d_name, "_");
513         if (ptr && *++ptr)
514                 time1 = atoll(ptr);
515
516         ptr = strrstr((*b)->d_name, "_");
517         if (ptr && *++ptr)
518                 time2 = atoll(ptr);
519
520         return (time1 - time2);
521 }
522
523 static int clear_logs(void *data)
524 {
525         struct dirent **namelist;
526         int n, i, ret;
527         char fpath[BUF_MAX];
528         char *fname;
529         char *dir = (char*)data;
530         int len;
531
532         if (!memlog_enabled)
533                 return RESOURCED_ERROR_NONE;
534
535         if (!dir)
536                 return RESOURCED_ERROR_NONE;
537
538         len = strlen(dir);
539         if (len <= 0 || len >= sizeof fpath - 1) {
540                 _E("Invalid parameter - Directory path is too short or too long");
541                 return RESOURCED_ERROR_INVALID_PARAMETER;
542         }
543
544         n = scandir(dir, &namelist, memps_file_select, timesort);
545
546         _D("num of log files %d", n);
547         if (n <= memlog_nr_max) {
548                 while (n--)
549                         free(namelist[n]);
550                 free(namelist);
551                 return RESOURCED_ERROR_NONE;
552         }
553
554         strncpy(fpath, dir, sizeof fpath - 1);
555         fpath[sizeof fpath - 1] = '\0';
556         fname = fpath + len;
557         *fname++ = '/';
558
559         len = sizeof fpath - len - 1;
560         for (i = 0; i < n; i++) {
561                 if (i < n - memlog_remove_batch_thres) {
562                         if (strlen(namelist[i]->d_name) > len - 1)
563                                 continue;
564                         strncpy(fname, namelist[i]->d_name, len - 1);
565                         fpath[sizeof fpath - 1] = '\0';
566                         _D("remove log file %s", fpath);
567                         ret = remove(fpath);
568                         if (ret < 0)
569                                 _E("%s file cannot removed", fpath);
570                 }
571
572                 free(namelist[i]);
573         }
574         free(namelist);
575         return RESOURCED_ERROR_NONE;
576 }
577
578 void make_memps_log(enum mem_log memlog, pid_t pid, char *victim_name)
579 {
580         time_t now;
581         struct tm cur_tm;
582         char new_log[BUF_MAX];
583         static pid_t old_pid;
584         int oom_score_adj = 0, ret;
585         char *prefix;
586
587         if (!memlog_enabled)
588                 return;
589
590         if (memlog < MEMLOG_MEMPS || memlog >= MEMLOG_MAX)
591                 return;
592
593         prefix = memlog_prefix[memlog];
594
595         if (old_pid == pid)
596                 return;
597
598         old_pid = pid;
599
600         now = time(NULL);
601
602         if (localtime_r(&now, &cur_tm) == NULL) {
603                 _E("Fail to get localtime");
604                 return;
605         }
606
607         snprintf(new_log, sizeof(new_log),
608                 "%s/%s_%s_%d_%.4d%.2d%.2d%.2d%.2d%.2d", memlog_path, prefix, victim_name,
609                 pid, (1900 + cur_tm.tm_year), 1 + cur_tm.tm_mon,
610                 cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min,
611                 cur_tm.tm_sec);
612
613         ret = proc_get_oom_score_adj(pid, &oom_score_adj);
614         if (ret || oom_score_adj > OOMADJ_BACKGRD_LOCKED) {
615
616                 _cleanup_fclose_ FILE *f = NULL;
617
618                 f = fopen(new_log, "w");
619                 if (!f) {
620                         _E("fail to create memps log %s", new_log);
621                         return;
622                 }
623                 proc_print_meninfo(f);
624
625         } else {
626
627                 const char *argv[4] = {"/usr/bin/memps", "-f", NULL, NULL};
628
629                 argv[2] = new_log;
630                 exec_cmd(ARRAY_SIZE(argv), argv);
631         }
632
633         /* best effort to limit the number of logfiles up to memlog_nr_max */
634         clear_logs(memlog_path);
635 }
636
637 static int lowmem_kill_victim(const struct task_info *tsk,
638                 int flags, int memps_log, unsigned int *victim_size)
639 {
640         pid_t pid;
641         int ret;
642         char appname[PATH_MAX];
643         int sigterm = 0;
644         struct proc_app_info *pai;
645
646         pid = tsk->pid;
647
648         if (pid <= 0 || pid == getpid())
649                 return RESOURCED_ERROR_FAIL;
650
651         ret = proc_get_cmdline(pid, appname, sizeof appname);
652         if (ret == RESOURCED_ERROR_FAIL)
653                 return RESOURCED_ERROR_FAIL;
654
655         if (!strcmp("memps", appname) ||
656             !strcmp("crash-worker", appname) ||
657             !strcmp("system-syspopup", appname)) {
658                 _E("%s(%d) was selected, skip it", appname, pid);
659                 return RESOURCED_ERROR_FAIL;
660         }
661
662         if (!memps_log)
663                 make_memps_log(MEMLOG_MEMPS, pid, appname);
664
665         pai = tsk->pai;
666         if (pai) {
667                 resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
668                         pid, NULL, NULL, PROC_TYPE_NONE);
669
670                 if (tsk->oom_score_lru <= OOMADJ_BACKGRD_LOCKED) {
671                         sigterm = 1;
672                 } else if (tsk->oom_score_lru > OOMADJ_BACKGRD_LOCKED && tsk->oom_score_lru < OOMADJ_BACKGRD_UNLOCKED) {
673                         int app_flag = pai->flags;
674                         sigterm = app_flag & PROC_SIGTERM;
675                 }
676
677                 if (pai->memory.oom_killed)
678                         sigterm = 0;
679
680                 pai->memory.oom_killed = true;
681         }
682
683         if (sigterm)
684                 safe_kill(pid, SIGTERM);
685         else
686                 safe_kill(pid, SIGKILL);
687
688         _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u, sigterm = %d\n",
689            flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
690            tsk->size, sigterm);
691         *victim_size = tsk->size;
692
693         if (tsk->oom_score_lru > OOMADJ_FOREGRD_UNLOCKED)
694                 return RESOURCED_ERROR_NONE;
695
696         if (oom_popup_enable && !oom_popup) {
697                 lowmem_launch_oompopup();
698                 oom_popup = true;
699         }
700         if (memps_log)
701                 make_memps_log(MEMLOG_MEMPS, pid, appname);
702
703         return RESOURCED_ERROR_NONE;
704 }
705
706 /* return LOWMEM_RECLAIM_CONT when killing should be continued */
707 static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
708 {
709         unsigned int available;
710
711         /*
712          * Processes with the priority higher than perceptible are killed
713          * only when the available memory is less than dynamic oom threshold.
714          */
715         if (tsk->oom_score_lru > OOMADJ_BACKGRD_PERCEPTIBLE)
716                 return LOWMEM_RECLAIM_CONT;
717
718         if (flags & (OOM_FORCE|OOM_SINGLE_SHOT)) {
719                 _I("[LMK] %d is dropped during force kill, flag=%d",
720                         tsk->pid, flags);
721                 return LOWMEM_RECLAIM_DROP;
722         }
723         available = proc_get_mem_available();
724         if (available > lmk_start_threshold) {
725                 _I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
726                         available, lmk_start_threshold);
727                 return LOWMEM_RECLAIM_RETRY;
728         }
729         return LOWMEM_RECLAIM_CONT;
730 }
731
732 static int compare_victims(const struct task_info *ta, const struct task_info *tb)
733 {
734         unsigned int pa, pb;
735
736         assert(ta != NULL);
737         assert(tb != NULL);
738         /*
739          * followed by kernel badness point calculation using heuristic.
740          * oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
741          */
742         pa = ta->oom_score_lru * (ktotalram / 2000) + ta->size;
743         pb = tb->oom_score_lru * (ktotalram / 2000) + tb->size;
744
745         return pb - pa;
746 }
747
748 static void lowmem_free_task_info_array(GArray *array)
749 {
750         int i;
751
752         for (i = 0; i < array->len; i++) {
753                 struct task_info *tsk;
754
755                 tsk = &g_array_index(array, struct task_info, i);
756                 if (tsk->pids)
757                         g_array_free(tsk->pids, true);
758         }
759
760         g_array_free(array, true);
761 }
762
763 static inline int is_dynamic_process_killer(int flags)
764 {
765         return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
766 }
767
768 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
769 {
770         unsigned int available = proc_get_mem_available();
771         unsigned int should_be_freed = 0;
772
773         if (available < thres)
774                 should_be_freed = thres - available;
775         /*
776          * free THRESHOLD_MARGIN more than real should be freed,
777          * because launching app is consuming up the memory.
778          */
779         if (should_be_freed > 0)
780                 should_be_freed += THRESHOLD_MARGIN;
781
782         *avail = available;
783
784         return should_be_freed;
785 }
786
787 static int lowmem_get_pids_proc(GArray *pids)
788 {
789         DIR *dp;
790         struct dirent *dentry;
791
792         dp = opendir("/proc");
793         if (!dp) {
794                 _E("fail to open /proc");
795                 return RESOURCED_ERROR_FAIL;
796         }
797         while ((dentry = readdir(dp)) != NULL) {
798                 struct task_info tsk;
799                 pid_t pid = 0, pgid = 0;
800                 int oom = 0;
801
802                 if (!isdigit(dentry->d_name[0]))
803                         continue;
804
805                 pid = (pid_t)atoi(dentry->d_name);
806                 if (pid < 1)
807                         /* skip invalid pids or kernel processes */
808                         continue;
809
810                 pgid = getpgid(pid);
811                 if (pgid < 1)
812                         continue;
813
814                 if(is_app(pid) != 1)
815                         continue;
816
817                 if (proc_get_oom_score_adj(pid, &oom) < 0) {
818                         _D("pid(%d) was already terminated", pid);
819                         continue;
820                 }
821
822                 /* VIP pids should be excluded from the LMK list */
823                 if (cgroup_get_type(oom) == CGROUP_VIP)
824                         continue;
825
826                 /*
827                  * Check whether this array includes applications or not.
828                  * If it doesn't require to get applications
829                  * and pid has been already included in pai,
830                  * skip to append.
831                  */
832                 if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
833                         continue;
834
835                 /*
836                  * Currently, for tasks in the memory cgroup,
837                  * do not consider multiple tasks with one pgid.
838                  */
839                 tsk.pid = pid;
840                 tsk.pgid = pgid;
841                 tsk.oom_score_adj = oom;
842                 tsk.oom_score_lru = oom;
843                 tsk.pids = NULL;
844                 tsk.size = lowmem_get_task_mem_usage_rss(&tsk);
845                 tsk.pai = NULL;
846
847                 g_array_append_val(pids, tsk);
848         }
849
850         closedir(dp);
851         return RESOURCED_ERROR_NONE;
852 }
853
854 /**
855  * @brief Terminate up to max_victims processes after finding them from pai.
856         It depends on proc_app_info lists
857         and it also reference systemservice cgroup
858         because some processes in this group don't have proc_app_info.
859  *
860  * @max_victims:            max number of processes to be terminated
861  * @start_oom:      find victims from start oom adj score value
862  * @end_oom: find victims to end oom adj score value
863  * @should_be_freed: amount of memory to be reclaimed (in MB)
864  * @total_size[out]: total size of possibly reclaimed memory (required)
865  * @completed:      final outcome (optional)
866  * @threshold:          desired value of memory available
867  */
868 static int lowmem_kill_victims(int max_victims,
869         int start_oom, int end_oom, unsigned should_be_freed, int flags,
870         unsigned int *total_size, int *completed, int threshold)
871 {
872         int total_count = 0;
873         GSList *proc_app_list = NULL;
874         int i, ret, victim = 0;
875         unsigned int victim_size = 0;
876         unsigned int total_victim_size = 0;
877         int status = LOWMEM_RECLAIM_NONE;
878         GArray *candidates = NULL;
879         GSList *iter, *iterchild;
880         struct proc_app_info *pai = NULL;
881         int oom_score_adj;
882         int should_be_freed_kb = MBYTE_TO_KBYTE(should_be_freed);
883
884         candidates = g_array_new(false, false, sizeof(struct task_info));
885
886         proc_app_list = proc_app_list_open();
887         gslist_for_each_item(iter, proc_app_list) {
888                 struct task_info ti;
889
890                 total_count++;
891                 pai = (struct proc_app_info *)iter->data;
892                 if (!pai->main_pid)
893                         continue;
894
895                 oom_score_adj = pai->memory.oom_score_adj;
896                 if (oom_score_adj > end_oom || oom_score_adj < start_oom)
897                         continue;
898
899                 if ((flags & OOM_REVISE) && pai->memory.oom_killed)
900                         continue;
901
902                 ti.pid = pai->main_pid;
903                 ti.pgid = getpgid(ti.pid);
904                 ti.oom_score_adj = oom_score_adj;
905                 ti.pai = pai;
906
907                 /*
908                  * Before oom_score_adj of favourite (oom_score = 270) applications is
909                  * independent of lru_state, now we consider lru_state, while
910                  * killing favourite process.
911                  */
912
913                 if (oom_score_adj == OOMADJ_FAVORITE && pai->lru_state >= PROC_BACKGROUND)
914                         ti.oom_score_lru = OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE * pai->lru_state;
915                 else
916                         ti.oom_score_lru = oom_score_adj;
917
918                 if (pai->childs) {
919                         ti.pids = g_array_new(false, false, sizeof(pid_t));
920                         g_array_append_val(ti.pids, ti.pid);
921                         gslist_for_each_item(iterchild, pai->childs) {
922                                 pid_t child = GPOINTER_TO_PID(iterchild->data);
923                                 g_array_append_val(ti.pids, child);
924                         }
925                 } else
926                         ti.pids = NULL;
927
928                 g_array_append_val(candidates, ti);
929         }
930
931         proc_app_list_close();
932
933         if (!candidates->len) {
934                 status = LOWMEM_RECLAIM_NEXT_TYPE;
935                 goto leave;
936         }
937         else {
938                 _D("[LMK] candidate ratio=%d/%d", candidates->len, total_count);
939         }
940
941         for (i = 0; i < candidates->len; i++) {
942                 struct task_info *tsk;
943
944                 tsk = &g_array_index(candidates, struct task_info, i);
945                 tsk->size = lowmem_get_task_mem_usage_rss(tsk);
946         }
947
948         /*
949          * In case of start_oom == OOMADJ_SU,
950          * we're going to try to kill some of processes in /proc
951          * to handle low memory situation.
952          * It can find malicious system process even though it has low oom score.
953          */
954         if (start_oom == OOMADJ_SU)
955                 lowmem_get_pids_proc(candidates);
956
957         g_array_sort(candidates, (GCompareFunc)compare_victims);
958
959         for (i = 0; i < candidates->len; i++) {
960                 struct task_info *tsk;
961
962                 if (i >= max_victims) {
963                         status = LOWMEM_RECLAIM_NEXT_TYPE;
964                         break;
965                 }
966
967                 /*
968                  * Available memory is checking only every
969                  * num_vict_between_check process for reducing burden.
970                  */
971                 if (!(i % num_vict_between_check)) {
972                         if (proc_get_mem_available() > threshold) {
973                                 status = LOWMEM_RECLAIM_DONE;
974                                 break;
975                         }
976                 }
977
978                 if (!(flags & OOM_NOMEMORY_CHECK) &&
979                     total_victim_size >= should_be_freed_kb) {
980                         _D("[LMK] victim=%d, max_victims=%d, total_size=%uKB",
981                                 victim, max_victims, total_victim_size);
982                         status = LOWMEM_RECLAIM_DONE;
983                         break;
984                 }
985
986                 tsk = &g_array_index(candidates, struct task_info, i);
987
988                 status = lowmem_check_kill_continued(tsk, flags);
989                 if (status != LOWMEM_RECLAIM_CONT)
990                         break;
991
992                 _I("[LMK] select victims from proc_app_list pid(%d) with oom_score_adj(%d)\n", tsk->pid, tsk->oom_score_adj);
993
994                 ret = lowmem_kill_victim(tsk, flags, i, &victim_size);
995                 if (ret != RESOURCED_ERROR_NONE)
996                         continue;
997                 victim++;
998                 total_victim_size += victim_size;
999         }
1000
1001 leave:
1002         lowmem_free_task_info_array(candidates);
1003         *total_size = total_victim_size;
1004         if(*completed != LOWMEM_RECLAIM_CONT)
1005                 *completed = status;
1006         else
1007                 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
1008         return victim;
1009 }
1010
1011 static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
1012 {
1013         if (type == CGROUP_VIP || type >= CGROUP_END || type <= CGROUP_TOP) {
1014                 _E("cgroup type (%d) is out of scope", type);
1015                 return RESOURCED_ERROR_FAIL;
1016         }
1017
1018         *max = cgroup_get_highest_oom_score_adj(type);
1019         *min = cgroup_get_lowest_oom_score_adj(type);
1020
1021         return RESOURCED_ERROR_NONE;
1022 }
1023
1024 static void lowmem_handle_request(struct lowmem_control *ctl)
1025 {
1026         int start_oom, end_oom;
1027         int count = 0, victim_cnt = 0;
1028         int max_victim_cnt = ctl->count;
1029         int status = LOWMEM_RECLAIM_NONE;
1030         unsigned int available = 0;
1031         unsigned int total_size = 0;
1032         unsigned int current_size = 0;
1033         unsigned int reclaim_size, shortfall = 0;
1034         enum cgroup_type cgroup_type = ctl->type;
1035
1036         available = proc_get_mem_available();
1037         reclaim_size = ctl->size  > available
1038                      ? ctl->size - available : 0;
1039
1040         if (!reclaim_size) {
1041                 status = LOWMEM_RECLAIM_DONE;
1042                 goto done;
1043         }
1044
1045 retry:
1046         /* Prepare LMK to start doing it's job. Check preconditions. */
1047         if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
1048                 goto done;
1049
1050         lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1051         shortfall = is_memory_recovered(&available, ctl->size);
1052
1053         if (!shortfall || !reclaim_size) {
1054                 status = LOWMEM_RECLAIM_DONE;
1055                 goto done;
1056         }
1057
1058         /* precaution */
1059         current_size = 0;
1060         victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
1061                             reclaim_size, ctl->flags, &current_size, &status, ctl->size);
1062
1063         if (victim_cnt) {
1064                 current_size = KBYTE_TO_MBYTE(current_size);
1065                 reclaim_size -= reclaim_size > current_size
1066                         ? current_size : reclaim_size;
1067                 total_size += current_size;
1068                 count += victim_cnt;
1069                 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
1070                                 victim_cnt, current_size,
1071                                 start_oom, end_oom, convert_status_to_str(status));
1072         }
1073
1074         if ((status == LOWMEM_RECLAIM_DONE) ||
1075             (status == LOWMEM_RECLAIM_DROP) ||
1076             (status == LOWMEM_RECLAIM_RETRY))
1077                 goto done;
1078
1079         /*
1080          * If it doesn't finish reclaiming memory in first operation,
1081                 - if flags has OOM_IN_DEPTH,
1082                    try to find victims again in the active cgroup.
1083                    otherwise, just return because there is no more victims in the desired cgroup.
1084                 - if flags has OOM_REVISE,
1085                    it means that resourced can't find victims from proc_app_list.
1086                    So, it should search victims or malicious process from /proc.
1087                    But searching /proc leads to abnormal behaviour.
1088                    (Make sluggish or kill same victims continuously)
1089                    Thus, otherwise, just return in first operation and wait some period.
1090          */
1091         if (cgroup_type == CGROUP_LOW) {
1092                 cgroup_type = CGROUP_MEDIUM;
1093                 goto retry;
1094         } else if ((cgroup_type == CGROUP_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
1095                 cgroup_type = CGROUP_HIGH;
1096                 if(ctl->flags & OOM_FORCE)
1097                         max_victim_cnt = FOREGROUND_VICTIMS;
1098                 goto retry;
1099         } else if ((cgroup_type == CGROUP_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
1100                 status = LOWMEM_RECLAIM_RETRY;
1101                 ctl->type = CGROUP_ROOT;
1102         }
1103         else if (cgroup_type == CGROUP_ROOT) {
1104                 status = LOWMEM_RECLAIM_RETRY;
1105         }
1106 done:
1107         _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
1108                 count, total_size, reclaim_size, shortfall, convert_status_to_str(status));
1109
1110         /* After we finish reclaiming it's worth to remove oldest memps logs */
1111         if (count && memlog_enabled)
1112                 request_helper_worker(CLEAR_LOGS, memlog_path, clear_logs, NULL);
1113         ctl->status = status;
1114 }
1115
1116 static void *lowmem_reclaim_worker(void *arg)
1117 {
1118         struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
1119
1120         setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
1121
1122         g_async_queue_ref(lmw->queue);
1123
1124         while (1) {
1125                 int try_count = 0;
1126                 struct lowmem_control *ctl;
1127
1128                 LOWMEM_WORKER_IDLE(lmw);
1129                 /* Wait on any wake-up call */
1130                 ctl = g_async_queue_pop(lmw->queue);
1131
1132                 if (ctl->flags & OOM_DROP)
1133                         LOWMEM_DESTROY_REQUEST(ctl);
1134
1135                 if (!LOWMEM_WORKER_IS_ACTIVE(lmw) || !ctl)
1136                         break;
1137
1138                 LOWMEM_WORKER_RUN(lmw);
1139 process_again:
1140                 _D("[LMK] %d tries", ++try_count);
1141                 lowmem_handle_request(ctl);
1142                 /**
1143                  * Case the process failed to reclaim requested amount of memory
1144                  * or still under have memory pressure - try the timeout wait.
1145                  * There is a chance this will get woken-up in a better reality.
1146                  */
1147                 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
1148                     !(ctl->flags & OOM_SINGLE_SHOT)) {
1149                         unsigned int available = proc_get_mem_available();
1150
1151                         if (available >= ctl->size) {
1152                                 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
1153                                         ctl->size, available);
1154                                 ctl->status = LOWMEM_RECLAIM_DONE;
1155                                 if (ctl->callback)
1156                                         ctl->callback(ctl);
1157                                 LOWMEM_DESTROY_REQUEST(ctl);
1158                                 LOWMEM_WORKER_IDLE(lmw);
1159                                 continue;
1160                         }
1161
1162                         if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
1163                                 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
1164                                 ctl->flags |= OOM_REVISE;
1165                                 goto process_again;
1166                         }
1167                 }
1168
1169                 /*
1170                  * The ctl callback would check available size again.
1171                  * And it is last point in reclaiming worker.
1172                  * Resourced sent SIGKILL signal to victim processes
1173                  * so it should wait for a some seconds until each processes returns memory.
1174                  */
1175                 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
1176                 if (ctl->callback)
1177                         ctl->callback(ctl);
1178
1179                 /* The lmk becomes the owner of all queued requests .. */
1180                 LOWMEM_DESTROY_REQUEST(ctl);
1181                 LOWMEM_WORKER_IDLE(lmw);
1182         }
1183         g_async_queue_unref(lmw->queue);
1184         pthread_exit(NULL);
1185 }
1186
1187 static void change_lowmem_state(unsigned int mem_state)
1188 {
1189         cur_mem_state = mem_state;
1190         lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1191
1192         resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
1193                 (void *)&cur_mem_state);
1194 }
1195
1196 /* only app can call this function
1197  * that is, service cannot call the function
1198  */
1199 static void lowmem_swap_memory(char *path)
1200 {
1201         unsigned int available;
1202
1203         if (cur_mem_state == MEM_LEVEL_HIGH)
1204                 return;
1205
1206         if (swap_get_state() != SWAP_ON)
1207                 return;
1208
1209         available = proc_get_mem_available();
1210         if (cur_mem_state != MEM_LEVEL_LOW &&
1211             available <= get_root_memcg_info()->threshold[MEM_LEVEL_LOW])
1212                 swap_activate_act();
1213
1214         resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1215         memcg_swap_status = true;
1216 }
1217
1218 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
1219 {
1220         int error;
1221         int oom_score_adj;
1222         int lowest_oom_score_adj;
1223
1224         if (!path) {
1225                 _E("[SWAP] Unknown memory cgroup path to swap");
1226                 return;
1227         }
1228
1229         /* In this case, corresponding process will be moved to memory CGROUP_LOW.
1230          */
1231         if (move) {
1232                 error = proc_get_oom_score_adj(pid, &oom_score_adj);
1233                 if (error) {
1234                         _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
1235                         return;
1236                 }
1237
1238                 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(CGROUP_LOW);
1239
1240                 if (oom_score_adj < lowest_oom_score_adj) {
1241                         oom_score_adj = lowest_oom_score_adj;
1242                         /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
1243                         proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
1244                         return;
1245                 }
1246         }
1247
1248         /* Correponding process is already managed per app or service.
1249          * In addition, if some process is already located in the CGROUP_LOW, then just do swap
1250          */
1251         resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1252 }
1253
1254 static void memory_level_send_system_event(int lv)
1255 {
1256         bundle *b;
1257         const char *str;
1258
1259         switch (lv) {
1260                 case MEM_LEVEL_HIGH:
1261                 case MEM_LEVEL_MEDIUM:
1262                 case MEM_LEVEL_LOW:
1263                         str = EVT_VAL_MEMORY_NORMAL;
1264                         break;
1265                 case MEM_LEVEL_CRITICAL:
1266                         str = EVT_VAL_MEMORY_SOFT_WARNING;
1267                         break;
1268                 case MEM_LEVEL_OOM:
1269                         str = EVT_VAL_MEMORY_HARD_WARNING;
1270                         break;
1271                 default:
1272                         _E("Invalid state");
1273                         return;
1274         }
1275
1276         b = bundle_create();
1277         if (!b) {
1278                 _E("Failed to create bundle");
1279                 return;
1280         }
1281
1282         bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
1283         eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
1284         bundle_free(b);
1285 }
1286
1287 static void high_mem_act(void)
1288 {
1289         int ret, status;
1290
1291         ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1292         if (ret)
1293                 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1294         if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1295                 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1296                               VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1297                 memory_level_send_system_event(MEM_LEVEL_HIGH);
1298         }
1299
1300         change_lowmem_state(MEM_LEVEL_HIGH);
1301
1302         if (swap_get_state() == SWAP_ON && memcg_swap_status) {
1303                 resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(CGROUP_LOW));
1304                 memcg_swap_status = false;
1305         }
1306         if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1307                 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1308                         (void *)CGROUP_FREEZER_ENABLED);
1309 }
1310
1311 static void swap_activate_act(void)
1312 {
1313         int ret, status;
1314
1315         ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1316         if (ret)
1317                 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1318
1319         if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1320                 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1321                                 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1322                 memory_level_send_system_event(MEM_LEVEL_LOW);
1323         }
1324         change_lowmem_state(MEM_LEVEL_LOW);
1325         if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1326                 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1327                         (void *)CGROUP_FREEZER_ENABLED);
1328
1329         if (swap_get_state() != SWAP_ON)
1330                 resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
1331 }
1332
1333 static void dedup_act(enum ksm_scan_mode mode)
1334 {
1335         int ret, status;
1336         int data;
1337
1338         if (dedup_get_state() != DEDUP_ONE_SHOT)
1339                 return;
1340
1341         if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1342                 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1343                                 (void *)CGROUP_FREEZER_ENABLED);
1344
1345         if (mode == KSM_SCAN_PARTIAL) {
1346                 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1347                 if (ret)
1348                         _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1349
1350                 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1351                         vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1352                                         VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1353                         memory_level_send_system_event(MEM_LEVEL_MEDIUM);
1354                 }
1355                 change_lowmem_state(MEM_LEVEL_MEDIUM);
1356
1357                 data = KSM_SCAN_PARTIAL;
1358                 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1359         } else if (mode == KSM_SCAN_FULL) {
1360                 data = KSM_SCAN_FULL;
1361                 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1362         }
1363 }
1364
1365 static void swap_compact_act(void)
1366 {
1367         change_lowmem_state(MEM_LEVEL_CRITICAL);
1368         resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
1369         memory_level_send_system_event(MEM_LEVEL_CRITICAL);
1370 }
1371
1372 static void medium_cb(struct lowmem_control *ctl)
1373 {
1374         if (ctl->status == LOWMEM_RECLAIM_DONE)
1375                 oom_popup = false;
1376         lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1377 }
1378
1379 static void lmk_act(void)
1380 {
1381         unsigned int available;
1382         int ret;
1383         int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
1384
1385         /*
1386          * Don't trigger reclaim worker
1387          * if it is already running
1388          */
1389         if (LOWMEM_WORKER_IS_RUNNING(&lmw))
1390                 return;
1391
1392         ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1393         if (ret)
1394                 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1395
1396         memory_level_send_system_event(MEM_LEVEL_OOM);
1397         if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
1398                 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
1399                         resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1400                                 (void *)CGROUP_FREEZER_PAUSED);
1401                 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1402                               VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1403         }
1404         available = proc_get_mem_available();
1405
1406         change_lowmem_state(MEM_LEVEL_OOM);
1407
1408         if (available < get_root_memcg_info()->threshold_leave) {
1409                 struct lowmem_control *ctl;
1410
1411                 ctl = LOWMEM_NEW_REQUEST();
1412                 if (ctl) {
1413                         LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
1414                                 CGROUP_LOW, get_root_memcg_info()->threshold_leave,
1415                                 num_max_victims, medium_cb);
1416                         lowmem_queue_request(&lmw, ctl);
1417                 }
1418         }
1419
1420         resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
1421
1422         /*
1423          * Flush resourced memory such as other processes.
1424          * Resourced can use both many fast bins and sqlite3 cache memery.
1425          */
1426         malloc_trim(0);
1427
1428         return;
1429 }
1430
1431 static void lowmem_trigger_memory_state_action(int mem_state)
1432 {
1433         /*
1434          * Check if the state we want to set is different from current
1435          * But it should except this condition if mem_state is already medium.
1436          * Otherwise, recalim worker couldn't run any more.
1437          */
1438         if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
1439                 return;
1440
1441         switch (mem_state) {
1442         case MEM_LEVEL_HIGH:
1443                 high_mem_act();
1444                 break;
1445         case MEM_LEVEL_MEDIUM:
1446                 dedup_act(KSM_SCAN_PARTIAL);
1447                 break;
1448         case MEM_LEVEL_LOW:
1449                 swap_activate_act();
1450                 break;
1451         case MEM_LEVEL_CRITICAL:
1452                 dedup_act(KSM_SCAN_FULL);
1453                 swap_compact_act();
1454                 break;
1455         case MEM_LEVEL_OOM:
1456                 lmk_act();
1457                 break;
1458         default:
1459                 assert(0);
1460         }
1461 }
1462
1463 static void lowmem_dump_cgroup_procs(struct memcg_info *mi)
1464 {
1465         int i;
1466         unsigned int size;
1467         pid_t pid;
1468         GArray *pids_array = NULL;
1469
1470         cgroup_get_pids(mi->name, &pids_array);
1471
1472         for (i = 0; i < pids_array->len; i++) {
1473                 pid = g_array_index(pids_array, pid_t, i);
1474                 lowmem_mem_usage_uss(pid, &size);
1475                 _I("pid = %d, size = %u KB", pid, size);
1476         }
1477         g_array_free(pids_array, true);
1478 }
1479
1480 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi)
1481 {
1482         struct lowmem_control *ctl;
1483
1484         /* To Do: only start to kill fg victim when no pending fg victim */
1485         lowmem_dump_cgroup_procs(mi);
1486
1487         ctl = LOWMEM_NEW_REQUEST();
1488         if (ctl) {
1489                 LOWMEM_SET_REQUEST(ctl, OOM_SINGLE_SHOT | OOM_IN_DEPTH, type,
1490                         mi->oomleave, num_max_victims, NULL);
1491                 lowmem_queue_request(&lmw, ctl);
1492         }
1493 }
1494
1495 static unsigned int check_mem_state(unsigned int available)
1496 {
1497         int mem_state;
1498         for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
1499                 if (mem_state != MEM_LEVEL_OOM && available <= get_root_memcg_info()->threshold[mem_state])
1500                         break;
1501                 else if (mem_state == MEM_LEVEL_OOM && available <= lmk_start_threshold)
1502                         break;
1503         }
1504
1505         return mem_state;
1506 }
1507
1508 /*static int load_bg_reclaim_config(struct parse_result *result, void *user_data)
1509 {
1510         if (!result)
1511                 return RESOURCED_ERROR_INVALID_PARAMETER;
1512
1513         if (strncmp(result->section, MEM_BG_RECLAIM_SECTION, strlen(MEM_BG_RECLAIM_SECTION)+1))
1514                 return RESOURCED_ERROR_NONE;
1515
1516         if (!strncmp(result->name, MEM_BG_RECLAIM_STRING, strlen(MEM_BG_RECLAIM_STRING)+1)) {
1517                 if (!strncmp(result->value, "yes", strlen("yes")+1))
1518                         bg_reclaim = true;
1519                 else if (!strncmp(result->value, "no", strlen("no")+1))
1520                         bg_reclaim = false;
1521         }
1522
1523
1524         return RESOURCED_ERROR_NONE;
1525 }
1526
1527 static int load_popup_config(struct parse_result *result, void *user_data)
1528 {
1529         if (!result)
1530                 return RESOURCED_ERROR_INVALID_PARAMETER;
1531
1532         if (strncmp(result->section, MEM_POPUP_SECTION, strlen(MEM_POPUP_SECTION)+1))
1533                 return RESOURCED_ERROR_NONE;
1534
1535         if (!strncmp(result->name, MEM_POPUP_STRING, strlen(MEM_POPUP_STRING)+1)) {
1536                 if (!strncmp(result->value, "yes", strlen("yes")+1))
1537                         oom_popup_enable = true;
1538                 else if (!strncmp(result->value, "no", strlen("no")+1))
1539                         oom_popup_enable = false;
1540         }
1541
1542
1543         return RESOURCED_ERROR_NONE;
1544 }
1545
1546 static int load_mem_log_config(struct parse_result *result, void *user_data)
1547 {
1548         if (!result)
1549                 return RESOURCED_ERROR_INVALID_PARAMETER;
1550
1551         if (strncmp(result->section, MEM_LOGGING_SECTION, strlen(MEM_LOGGING_SECTION)+1))
1552                 return RESOURCED_ERROR_NONE;
1553
1554         if (!strncmp(result->name, "Enable", strlen("Enable")+1)) {
1555                 memlog_enabled = atoi(result->value);
1556         } else if (!strncmp(result->name, "LogPath", strlen("LogPath")+1)) {
1557                 memlog_path = strdup(result->value);
1558         } else if (!strncmp(result->name, "MaxNumLogfile", strlen("MaxNumLogfile")+1)) {
1559                 memlog_nr_max = atoi(result->value);
1560                 memlog_remove_batch_thres = (memlog_nr_max * 5) / 6;
1561         } else if (!strncmp(result->name, "PrefixMemps", strlen("PrefixMemps")+1)) {
1562                 memlog_prefix[MEMLOG_MEMPS] = strdup(result->value);
1563         } else if (!strncmp(result->name, "PrefixMempsMemLimit", strlen("PrefixMempsMemLimit")+1)) {
1564                 memlog_prefix[MEMLOG_MEMPS_MEMLIMIT] = strdup(result->value);
1565         }
1566
1567         return RESOURCED_ERROR_NONE;
1568 }
1569
1570 static int set_memory_config(struct parse_result *result, void *user_data)
1571 {
1572         if (!result)
1573                 return RESOURCED_ERROR_NONE;
1574
1575         if (strncmp(result->section, MEM_SECTION, strlen(MEM_SECTION)+1))
1576                 return RESOURCED_ERROR_NONE;
1577
1578         if (!strncmp(result->name, "ThresholdDedup", strlen("ThresholdDedup")+1)) {
1579                 int value = atoi(result->value);
1580                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, value);
1581         } else if (!strncmp(result->name, "ThresholdSwap", strlen("ThresholdSwap")+1)) {
1582                 int value = atoi(result->value);
1583                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, value);
1584         } else if (!strncmp(result->name, "ThresholdLow", strlen("ThresholdLow")+1)) {
1585                 int value = atoi(result->value);
1586                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, value);
1587         } else if (!strncmp(result->name, "ThresholdMedium", strlen("ThresholdMedium")+1)) {
1588                 int value = atoi(result->value);
1589                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, value);
1590         } else if (!strncmp(result->name, "ThresholdLeave", strlen("ThresholdLeave")+1)) {
1591                 int value = atoi(result->value);
1592                 memcg_set_leave_threshold(CGROUP_ROOT, value);
1593         } else if (!strncmp(result->name, "ThresholdRatioDedup", strlen("ThresholdRatioDedup")+1)) {
1594                 double ratio = atoi(result->value);
1595                 int value = (double)totalram * ratio / 100.0;
1596                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, BYTE_TO_MBYTE(value));
1597         } else if (!strncmp(result->name, "ThresholdRatioSwap", strlen("ThresholdRatioSwap")+1)) {
1598                 double ratio = atoi(result->value);
1599                 int value = (double)totalram * ratio / 100.0;
1600                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, BYTE_TO_MBYTE(value));
1601         } else if (!strncmp(result->name, "ThresholdRatioLow", strlen("ThresholdRatioLow")+1)) {
1602                 double ratio = atoi(result->value);
1603                 int value = (double)totalram * ratio / 100.0;
1604                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, BYTE_TO_MBYTE(value));
1605         } else if (!strncmp(result->name, "ThresholdRatioMedium", strlen("ThresholdRatioMedium")+1)) {
1606                 double ratio = atoi(result->value);
1607                 int value = (double)totalram * ratio / 100.0;
1608                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, BYTE_TO_MBYTE(value));
1609         } else if (!strncmp(result->name, "ThresholdRatioLeave", strlen("ThresholdRatioLeave")+1)) {
1610                 double ratio = atoi(result->value);
1611                 int value = (double)totalram * ratio / 100.0;
1612                 memcg_set_leave_threshold(CGROUP_ROOT, BYTE_TO_MBYTE(value));
1613         } else if (!strncmp(result->name, "ForegroundRatio", strlen("ForegroundRatio")+1)) {
1614                 float ratio = atof(result->value);
1615                 memcg_info_set_limit(get_memcg_info(CGROUP_HIGH), ratio, totalram);
1616         } else if (!strncmp(result->name, "BackgroundRatio", strlen("BackgroundRatio")+1)) {
1617                 float ratio = atof(result->value);
1618                 memcg_info_set_limit(get_memcg_info(CGROUP_MEDIUM), ratio, totalram);
1619         } else if (!strncmp(result->name, "LowRatio", strlen("LowRatio")+1)) {
1620                 float ratio = atof(result->value);
1621                 memcg_info_set_limit(get_memcg_info(CGROUP_LOW), ratio, totalram);
1622         } else if (!strncmp(result->name, "NumMaxVictims", strlen("NumMaxVictims")+1)) {
1623                 int value = atoi(result->value);
1624                 num_max_victims = value;
1625                 num_vict_between_check = value > MAX_MEMORY_CGROUP_VICTIMS/2
1626                                                 ? 3 : value > MAX_MEMORY_CGROUP_VICTIMS/4
1627                                                                 ? 2 : 1;
1628         } else if (!strncmp(result->name, "ProactiveThreshold", strlen("ProactiveThreshold")+1)) {
1629                 int value = atoi(result->value);
1630                 proactive_threshold = value;
1631         } else if (!strncmp(result->name, "ProactiveLeave", strlen("ProactiveLeave")+1)) {
1632                 int value = atoi(result->value);
1633                 proactive_leave = value;
1634         } else if (!strncmp(result->name, "EventLevel", strlen("EventLevel")+1)) {
1635                 if (strncmp(event_level, result->value, strlen(event_level)))
1636                         event_level = strdup(result->value);
1637                 if (!event_level)
1638                         return RESOURCED_ERROR_OUT_OF_MEMORY;
1639         } else if (!strncmp(result->name, "SWAPPINESS", strlen("SWAPPINESS")+1)) {
1640                 int value = atoi(result->value);
1641                 memcg_set_default_swappiness(value);
1642                 memcg_info_set_swappiness(get_memcg_info(CGROUP_ROOT), value);
1643         } else if (!strncmp(result->name, "FOREGROUND_SWAPPINESS", strlen("FOREGROUND_SWAPPINESS")+1)) {
1644                 int value = atoi(result->value);
1645                 memcg_info_set_swappiness(get_memcg_info(CGROUP_HIGH), value);
1646         } else if (!strncmp(result->name, "BACKGROUND_SWAPPINESS", strlen("BACKGROUND_SWAPPINESS")+1)) {
1647                 int value = atoi(result->value);
1648                 memcg_info_set_swappiness(get_memcg_info(CGROUP_MEDIUM), value);
1649         } else if (!strncmp(result->name, "LOW_SWAPPINESS", strlen("LOW_SWAPPINESS")+1)) {
1650                 int value = atoi(result->value);
1651                 memcg_info_set_swappiness(get_memcg_info(CGROUP_LOW), value);
1652         } else if (!strncmp(result->name, "NumFragSize", strlen("NumFragSize")+1)) {
1653                 fragmentation_size = atoi(result->value);
1654         }
1655
1656         return RESOURCED_ERROR_NONE;
1657 }*/
1658
1659 /* setup memcg parameters depending on total ram size. */
1660 static void setup_memcg_params(void)
1661 {
1662         unsigned long long total_ramsize;
1663
1664         get_total_memory();
1665         total_ramsize = BYTE_TO_MBYTE(totalram);
1666
1667         _D("Total: %llu MB", total_ramsize);
1668         if (total_ramsize <= MEM_SIZE_64) {
1669                 /* set thresholds for ram size 64M */
1670                 proactive_threshold = PROACTIVE_64_THRES;
1671                 proactive_leave = PROACTIVE_64_LEAVE;
1672                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
1673                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
1674                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
1675                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
1676                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
1677                 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
1678         } else if (total_ramsize <= MEM_SIZE_256) {
1679                 /* set thresholds for ram size 256M */
1680                 proactive_threshold = PROACTIVE_256_THRES;
1681                 proactive_leave = PROACTIVE_256_LEAVE;
1682                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
1683                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
1684                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
1685                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
1686                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
1687                 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
1688         } else if (total_ramsize <= MEM_SIZE_448) {
1689                 /* set thresholds for ram size 448M */
1690                 proactive_threshold = PROACTIVE_448_THRES;
1691                 proactive_leave = PROACTIVE_448_LEAVE;
1692                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
1693                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
1694                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
1695                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1696                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1697                 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1698         } else if (total_ramsize <= MEM_SIZE_512) {
1699                 /* set thresholds for ram size 512M */
1700                 proactive_threshold = PROACTIVE_512_THRES;
1701                 proactive_leave = PROACTIVE_512_LEAVE;
1702                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1703                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1704                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1705                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1706                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1707                 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1708         }  else if (total_ramsize <= MEM_SIZE_768) {
1709                 /* set thresholds for ram size 512M */
1710                 proactive_threshold = PROACTIVE_768_THRES;
1711                 proactive_leave = PROACTIVE_768_LEAVE;
1712                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1713                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1714                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1715                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1716                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1717                 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1718         } else if (total_ramsize <= MEM_SIZE_1024) {
1719                 /* set thresholds for ram size more than 1G */
1720                 proactive_threshold = PROACTIVE_1024_THRES;
1721                 proactive_leave = PROACTIVE_1024_LEAVE;
1722                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1723                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1724                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1725                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1726                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1727                 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1728         } else if (total_ramsize <= MEM_SIZE_2048) {
1729                 proactive_threshold = PROACTIVE_2048_THRES;
1730                 proactive_leave = PROACTIVE_2048_LEAVE;
1731                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1732                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1733                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1734                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1735                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1736                 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1737         } else {
1738                 proactive_threshold = PROACTIVE_3072_THRES;
1739                 proactive_leave = PROACTIVE_3072_LEAVE;
1740                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1741                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1742                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1743                 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1744                 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1745                 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1746         }
1747
1748 }
1749
1750 static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
1751 {
1752         int cur_oom_score_adj;
1753         int cur_memcg_idx;
1754         struct memcg_info *mi;
1755         int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
1756
1757         if(next_memcg_idx < CGROUP_VIP || next_memcg_idx > CGROUP_LOW) {
1758                 _E("cgroup type (%d) should not be called", next_memcg_idx);
1759                 return;
1760         }
1761         mi = get_memcg_info(next_memcg_idx);
1762
1763         if (!mi) {
1764                 return;
1765         }
1766
1767         if (!pai) {
1768                 cgroup_write_pid_fullpath(mi->name, pid);
1769                 return;
1770         }
1771
1772         /* parent pid */
1773         if (pai->main_pid == pid) {
1774                 cur_oom_score_adj = pai->memory.oom_score_adj;
1775                 cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
1776
1777                 /* -1 means that this pid is not yet registered at the memory cgroup
1778                  * plz, reference proc_create_app_info function
1779                  */
1780                 if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
1781                         /* VIP processes should not be asked to move. */
1782                         if (cur_memcg_idx <= CGROUP_VIP) {
1783                                 _I("[DEBUG] pid: %d, name: %s, cur_oom_score_adj: %d", pid, pai->appid, cur_oom_score_adj);
1784                                 _E("[DEBUG] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
1785                                 return;
1786                         }
1787                 }
1788
1789                 _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
1790
1791                 if (cur_oom_score_adj == next_oom_score_adj) {
1792                         _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
1793                         return;
1794                 }
1795
1796                 proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
1797
1798                 if (!lowmem_limit_move_cgroup(pai))
1799                         return;
1800
1801                 if(cur_memcg_idx == next_memcg_idx)
1802                         return;
1803
1804                 cgroup_write_pid_fullpath(mi->name, pid);
1805                 if (next_memcg_idx == CGROUP_LOW)
1806                         lowmem_swap_memory(get_memcg_info(CGROUP_LOW)->name);
1807         }
1808         /* child pid */
1809         else {
1810                 if (pai->memory.use_mem_limit)
1811                         return;
1812
1813                 cgroup_write_pid_fullpath(mi->name, pid);
1814         }
1815 }
1816
1817 static int lowmem_activate_worker(void)
1818 {
1819         int ret = RESOURCED_ERROR_NONE;
1820
1821         if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1822                 return ret;
1823         }
1824
1825         lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1826         if (!lmw.queue) {
1827                 _E("Failed to create request queue\n");
1828                 return RESOURCED_ERROR_FAIL;
1829         }
1830         LOWMEM_WORKER_ACTIVATE(&lmw);
1831         ret = pthread_create(&lmw.worker_thread, NULL,
1832                 (void *)lowmem_reclaim_worker, (void *)&lmw);
1833         if (ret) {
1834                 LOWMEM_WORKER_DEACTIVATE(&lmw);
1835                 _E("Failed to create LMK thread: %d\n", ret);
1836         } else {
1837                 pthread_detach(lmw.worker_thread);
1838                 ret = RESOURCED_ERROR_NONE;
1839         }
1840         return ret;
1841 }
1842
1843 static void lowmem_deactivate_worker(void)
1844 {
1845         struct lowmem_control *ctl;
1846
1847         if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1848                 return;
1849
1850         LOWMEM_WORKER_DEACTIVATE(&lmw);
1851         lowmem_drain_queue(&lmw);
1852
1853         ctl = LOWMEM_NEW_REQUEST();
1854         if (!ctl) {
1855                 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1856                 return;
1857         }
1858         ctl->flags = OOM_DROP;
1859         g_async_queue_push(lmw.queue, ctl);
1860         g_async_queue_unref(lmw.queue);
1861 }
1862
1863 static int lowmem_press_eventfd_read(int fd)
1864 {
1865         uint64_t dummy_state;
1866
1867         return read(fd, &dummy_state, sizeof(dummy_state));
1868 }
1869
1870 static void lowmem_press_root_cgroup_handler(void)
1871 {
1872         static unsigned int prev_available;
1873         unsigned int available;
1874         int mem_state;
1875
1876         available = proc_get_mem_available();
1877         if (prev_available == available)
1878                 return;
1879
1880         mem_state = check_mem_state(available);
1881         lowmem_trigger_memory_state_action(mem_state);
1882
1883         prev_available = available;
1884 }
1885
1886 static void lowmem_press_cgroup_handler(enum cgroup_type type, struct memcg_info *mi)
1887 {
1888         unsigned int usage, threshold;
1889         int ret;
1890
1891         ret = memcg_get_anon_usage(mi->name, &usage);
1892         if (ret) {
1893                 _D("getting anonymous memory usage fails");
1894                 return;
1895         }
1896
1897         threshold = mi->threshold[MEM_LEVEL_OOM];
1898         if (usage >= threshold)
1899                 memory_cgroup_proactive_lmk_act(type, mi);
1900         else
1901                 _I("anon page %u MB < medium threshold %u MB", BYTE_TO_MBYTE(usage),
1902                                 BYTE_TO_MBYTE(threshold));
1903 }
1904
1905 static bool lowmem_press_eventfd_handler(int fd, void *data)
1906 {
1907         struct memcg_info *mi;
1908         enum cgroup_type type = CGROUP_ROOT;
1909
1910         // FIXME: probably shouldn't get ignored
1911         if (lowmem_press_eventfd_read(fd) < 0)
1912                 _E("Failed to read lowmem press event, %m\n");
1913
1914         for (type = CGROUP_ROOT; type < CGROUP_END; type++) {
1915                 if (!get_cgroup_tree(type) || !get_memcg_info(type))
1916                         continue;
1917                 mi = get_memcg_info(type);
1918                 if (fd == mi->evfd) {
1919                         /* call low memory handler for this memcg */
1920                         if (type == CGROUP_ROOT)
1921                                 lowmem_press_root_cgroup_handler();
1922                         else {
1923                                 lowmem_press_cgroup_handler(type, mi);
1924                         }
1925                         return true;
1926                 }
1927         }
1928
1929         return true;
1930 }
1931
1932 static int lowmem_press_register_eventfd(struct memcg_info *mi)
1933 {
1934         int evfd;
1935         const char *name = mi->name;
1936         static fd_handler_h handler;
1937
1938         if (mi->threshold[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
1939                 return 0;
1940
1941         evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
1942                         event_level);
1943
1944         if (evfd < 0) {
1945                 int saved_errno = errno;
1946                 _E("fail to register event press fd %s cgroup", name);
1947                 return -saved_errno;
1948         }
1949
1950         mi->evfd = evfd;
1951
1952         _I("register event fd success for %s cgroup", name);
1953         add_fd_read_handler(evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
1954         return 0;
1955 }
1956
1957 static int lowmem_press_setup_eventfd(void)
1958 {
1959         unsigned int i;
1960
1961         for (i = CGROUP_ROOT; i < CGROUP_END; i++) {
1962                 if (!get_use_hierarchy(i))
1963                         continue;
1964
1965                 lowmem_press_register_eventfd(get_memcg_info(i));
1966         }
1967         return RESOURCED_ERROR_NONE;
1968 }
1969
1970 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1971 {
1972         lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1973 }
1974
1975 int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold)
1976 {
1977         struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1978
1979         if (!ctl)
1980                 return -ENOMEM;
1981
1982         flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1983         victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1984         type = type > 0 ? type : CGROUP_LOW;
1985         threshold = threshold > 0 ? threshold : get_root_memcg_info()->threshold_leave;
1986
1987         lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1988         LOWMEM_SET_REQUEST(ctl, flags,
1989                 type, threshold, victims,
1990                 lowmem_force_reclaim_cb);
1991         lowmem_queue_request(&lmw, ctl);
1992
1993         return 0;
1994 }
1995
1996 void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size)
1997 {
1998         int size, victims;
1999
2000         victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
2001                                  ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2002
2003         size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
2004         _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
2005         lowmem_trigger_reclaim(0, victims, type, size);
2006 }
2007
2008 bool lowmem_fragmentated(void)
2009 {
2010         struct buddyinfo bi;
2011         int ret;
2012
2013         ret = proc_get_buddyinfo("Normal", &bi);
2014         if (ret < 0)
2015                 return false;
2016
2017         /*
2018          * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
2019          * If total buddy pages is smaller than fragmentation_size,
2020          * resourced will detect kernel memory is fragmented.
2021          * Default value is zero in low memory device.
2022          */
2023         if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
2024                 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
2025                 _I("fragmentation detected, need to execute proactive oom killer");
2026                 return true;
2027         }
2028         return false;
2029 }
2030
2031 static void lowmem_proactive_oom_killer(int flags, char *appid)
2032 {
2033         unsigned int before;
2034         int victims;
2035
2036         before = proc_get_mem_available();
2037
2038         /* If memory state is medium or normal, just return and kill in oom killer */
2039         if (before < get_root_memcg_info()->threshold[MEM_LEVEL_OOM] || before > proactive_leave)
2040                 return;
2041
2042         victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
2043                                  ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2044
2045 #ifdef HEART_SUPPORT
2046         /*
2047          * This branch is used only when HEART module is compiled in and
2048          * it's MEMORY module must be enabled. Otherwise this is skipped.
2049          */
2050         struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
2051         if (md) {
2052                 unsigned int rss, after, size;
2053
2054                 rss = KBYTE_TO_MBYTE(md->avg_rss);
2055
2056                 free(md);
2057
2058                 after = before - rss;
2059                 /*
2060                  * after launching app, ensure that available memory is
2061                  * above threshold_leave
2062                  */
2063                 if (after >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2064                         return;
2065
2066                 if (proactive_threshold - rss >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2067                         size = proactive_threshold;
2068                 else
2069                         size = rss + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
2070
2071                 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
2072                         rss, before, size);
2073                 lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
2074
2075                 return;
2076         }
2077 #endif
2078
2079         /*
2080          * When there is no history data for the launching app,
2081          * it is necessary to check current fragmentation state or application manifest file.
2082          * So, resourced feels proactive LMK is required, run oom killer based on dynamic
2083          * threshold.
2084          */
2085         if (lowmem_fragmentated())
2086                 goto reclaim;
2087
2088         /*
2089          * run proactive oom killer only when available is larger than
2090          * dynamic process threshold
2091          */
2092         if (!proactive_threshold || before >= proactive_threshold)
2093                 return;
2094
2095         if (!(flags & PROC_LARGEMEMORY))
2096                 return;
2097
2098 reclaim:
2099         /*
2100          * free THRESHOLD_MARGIN more than real should be freed,
2101          * because launching app is consuming up the memory.
2102          */
2103         _D("Run threshold based proactive LMK: memory level to reach: %u\n",
2104                 proactive_leave + THRESHOLD_MARGIN);
2105         lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
2106 }
2107
2108 unsigned int lowmem_get_proactive_thres(void)
2109 {
2110         return proactive_threshold;
2111 }
2112
2113 static int lowmem_prelaunch_handler(void *data)
2114 {
2115         struct proc_status *ps = (struct proc_status *)data;
2116         struct proc_app_info *pai = ps->pai;
2117
2118         if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
2119                 return RESOURCED_ERROR_NONE;
2120
2121         lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
2122         return RESOURCED_ERROR_NONE;
2123 }
2124
2125 int lowmem_control_handler(void *data)
2126 {
2127         struct lowmem_control_data *lowmem_data;
2128
2129         lowmem_data = (struct lowmem_control_data *)data;
2130         switch (lowmem_data->control_type) {
2131         case LOWMEM_MOVE_CGROUP:
2132                 lowmem_move_memcgroup((pid_t)lowmem_data->pid,
2133                                         lowmem_data->oom_score_adj, lowmem_data->pai);
2134                 break;
2135         default:
2136                 break;
2137         }
2138         return RESOURCED_ERROR_NONE;
2139 }
2140
2141 static int lowmem_bg_reclaim_handler(void *data)
2142 {
2143         if (swap_get_state() != SWAP_ON)
2144                 return RESOURCED_ERROR_NONE;
2145
2146         if (!bg_reclaim)
2147                 return RESOURCED_ERROR_NONE;
2148
2149         /*
2150          * Proactively reclaiming memory used by long-lived background processes
2151          * (such as widget instances) may be efficient on devices with limited
2152          * memory constraints. The pages used by such processes could be reclaimed
2153          * (if swap is enabled) earlier than they used to while minimizing the
2154          * impact on the user experience.
2155          */
2156         resourced_notify(RESOURCED_NOTIFIER_SWAP_START, get_memcg_info(CGROUP_MEDIUM)->name);
2157
2158         return RESOURCED_ERROR_NONE;
2159 }
2160
2161 static void load_configs(const char *path)
2162 {
2163 /*      if (config_parse(path, set_memory_config, NULL))
2164                 _E("(%s-mem) parse Fail", path);
2165
2166         if (config_parse(path, load_popup_config, NULL))
2167                 _E("(%s-popup) parse Fail", path);
2168
2169         if (config_parse(path, load_bg_reclaim_config, NULL))
2170                 _E("(%s-bg-reclaim) parse Fail", path);
2171
2172         if (config_parse(path, load_mem_log_config, NULL))
2173                 _E("(%s-mem-log) parse Fail", path);*/
2174
2175         free_memcg_conf();
2176 }
2177
2178 static void print_mem_configs(void)
2179 {
2180         /* print info of Memory section */
2181         for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++)
2182                 _I("set threshold for state '%s' to %u MB",
2183                 convert_memstate_to_str(mem_lvl), get_root_memcg_info()->threshold[mem_lvl]);
2184
2185         _I("set number of max victims as %d", num_max_victims);
2186         _I("set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
2187         _I("set proactive threshold to %u MB", proactive_threshold);
2188         _I("set proactive low memory killer leave to %u MB", proactive_leave);
2189
2190         /* print info of POPUP section */
2191         _I("oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
2192
2193         /* print info of BackgroundReclaim section */
2194         _I("Background reclaim is %s", bg_reclaim == true ? "enabled" : "disabled");
2195
2196         /* print info of Logging section */
2197         _I("memory logging is %s", memlog_enabled == 1 ? "enabled" : "disabled");
2198         _I("memory logging path is %s", memlog_path);
2199         _I("the max number of memory logging is %d", memlog_nr_max);
2200         _I("the batch threshold of memory log is %d", memlog_remove_batch_thres);
2201         _I("prefix of memps is %s", memlog_prefix[MEMLOG_MEMPS]);
2202         _I("prefix of memlimit memps is %s", memlog_prefix[MEMLOG_MEMPS_MEMLIMIT]);
2203 }
2204
2205 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
2206 static int lowmem_init(void)
2207 {
2208         int ret = RESOURCED_ERROR_NONE;
2209
2210         _D("resourced memory init start");
2211
2212         /* init memcg */
2213         ret = cgroup_make_full_subdir(MEMCG_PATH);
2214         ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
2215         memcg_params_init();
2216
2217         setup_memcg_params();
2218
2219         /* default configuration */
2220         load_configs(MEM_CONF_FILE);
2221
2222         /* this function should be called after parsing configurations */
2223         memcg_write_params();
2224         print_mem_configs();
2225
2226         /* make a worker thread called low memory killer */
2227         ret = lowmem_activate_worker();
2228         if (ret) {
2229                 _E("oom thread create failed\n");
2230                 return ret;
2231         }
2232
2233         /* register threshold and event fd */
2234         ret = lowmem_press_setup_eventfd();
2235         if (ret) {
2236                 _E("eventfd setup failed");
2237                 return ret;
2238         }
2239
2240         lowmem_dbus_init();
2241         lowmem_limit_init();
2242         lowmem_system_init();
2243
2244         register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2245         register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2246         register_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2247
2248         return ret;
2249 }
2250
2251 static int lowmem_exit(void)
2252 {
2253         if (strncmp(event_level, MEMCG_DEFAULT_EVENT_LEVEL, sizeof(MEMCG_DEFAULT_EVENT_LEVEL)))
2254                 free(event_level);
2255
2256         lowmem_deactivate_worker();
2257         lowmem_limit_exit();
2258         lowmem_system_exit();
2259
2260         unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2261         unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2262         unregister_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2263
2264         return RESOURCED_ERROR_NONE;
2265 }
2266
2267 static int resourced_memory_init(void *data)
2268 {
2269         lowmem_ops = &memory_modules_ops;
2270         return lowmem_init();
2271 }
2272
2273 static int resourced_memory_finalize(void *data)
2274 {
2275         return lowmem_exit();
2276 }
2277
2278 void lowmem_change_memory_state(int state, int force)
2279 {
2280         int mem_state;
2281
2282         if (force) {
2283                 mem_state = state;
2284         } else {
2285                 unsigned int available = proc_get_mem_available();
2286                 mem_state = check_mem_state(available);
2287         }
2288
2289         lowmem_trigger_memory_state_action(mem_state);
2290 }
2291
2292 unsigned long lowmem_get_ktotalram(void)
2293 {
2294         return ktotalram;
2295 }
2296
2297 unsigned long lowmem_get_totalram(void)
2298 {
2299         return totalram;
2300 }
2301
2302 void lowmem_restore_memcg(struct proc_app_info *pai)
2303 {
2304         char *cgpath;
2305         int index, ret;
2306         struct cgroup *cgroup = NULL;
2307         struct memcg_info *mi = NULL;
2308         pid_t pid = pai->main_pid;
2309
2310         ret = cgroup_pid_get_path("memory", pid, &cgpath);
2311         if (ret < 0)
2312                 return;
2313
2314         for (index = CGROUP_END-1; index >= CGROUP_ROOT; index--) {
2315                 cgroup = get_cgroup_tree(index);
2316                 if (!cgroup)
2317                         continue;
2318
2319                 mi = cgroup->memcg_info;
2320                 if (!mi)
2321                         continue;
2322
2323                 if (!strcmp(cgroup->hashname, ""))
2324                         continue;
2325                 if (strstr(cgpath, cgroup->hashname))
2326                         break;
2327         }
2328         pai->memory.memcg_idx = index;
2329         pai->memory.memcg_info = mi;
2330         if(strstr(cgpath, pai->appid))
2331                 pai->memory.use_mem_limit = true;
2332
2333         free(cgpath);
2334 }
2335
2336 static struct module_ops memory_modules_ops = {
2337         .priority       = MODULE_PRIORITY_HIGH,
2338         .name           = "lowmem",
2339         .init           = resourced_memory_init,
2340         .exit           = resourced_memory_finalize,
2341 };
2342
2343 MODULE_REGISTER(&memory_modules_ops)