4 * Copyright (c) 2012 - 2013 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file lowmem_handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2013 Samsung Electronics Co., Ltd. All rights reserved.
38 #include <sys/types.h>
41 #include <sys/eventfd.h>
42 #include <sys/sysinfo.h>
45 #include <sys/resource.h>
49 #include "proc-main.h"
50 #include "lowmem-handler.h"
51 #include "proc-process.h"
52 #include "swap-common.h"
53 #include "lowmem-common.h"
54 #include "resourced.h"
57 #include "config-parser.h"
59 #include "logging-common.h"
61 #define MEMINFO_PATH "/proc/meminfo"
62 #define MEMCG_PATH "/sys/fs/cgroup"
63 #define MEMPS_LOG_PATH "/var/log/"
64 #define MEMPS_LOG_FILE MEMPS_LOG_PATH"memps"
65 #define MEMPS_EXEC_PATH "usr/bin/memps"
66 #define MEMCG_MOVE_CHARGE_PATH "memory.move_charge_at_immigrate"
67 #define MEMCG_OOM_CONTROL_PATH "memory.oom_control"
68 #define MEMCG_LIMIT_PATH "memory.limit_in_bytes"
69 #define MEM_CONF_FILE "/etc/resourced/memory.conf"
70 #define MEM_CONF_SECTION "VIP_PROCESS"
71 #define MEM_CONF_PREDEFINE "PREDEFINE"
73 #define BtoMB(x) ((x) >> 20)
74 #define BtoKB(x) ((x) >> 10)
75 #define BtoPAGE(x) ((x) >> 12)
78 /* for memory cgroup, set no limit */
79 #define MEMCG_MEMORY_LIMIT_RATIO NO_LIMIT
80 #define MEMCG_FOREGROUND_LIMIT_RATIO 1
81 /* for background cgroup, set no limit */
82 #define MEMCG_BACKGROUND_LIMIT_RATIO NO_LIMIT
83 #define MEMCG_FOREGROUND_MIN_LIMIT UINT_MAX
84 #define MEMCG_BACKGROUND_MIN_LIMIT UINT_MAX
85 #define MEMCG_LOW_RATIO 0.8
86 #define MEMCG_MEDIUM_RATIO 0.96
87 #define MEMCG_FOREGROUND_THRES_LEAVE 100 /* MB */
88 #define MEMCG_FOREGROUND_LEAVE_RATIO 0.25
91 #define LOWMEM_PATH_MAX 100
92 #define MAX_MEMORY_CGROUP_VICTIMS 10
93 #define MAX_CGROUP_VICTIMS 1
94 #define OOM_TIMER_INTERVAL 2
95 #define OOM_MULTIKILL_WAIT (500*1000)
96 #define OOM_SIGKILL_WAIT 1
97 #define OOM_SCORE_POINT_WEIGHT 1500
98 #define OOM_KILLER_PRIORITY -20
99 #define MAX_FD_VICTIMS 10
100 #define MAX_SWAP_VICTIMS 5
101 #define MAX_MEMPS_LOGS 50
102 #define NUM_RM_LOGS 5
103 #define THRESHOLD_MARGIN 10 /* MB */
105 #define MEM_SIZE_64 64 /* MB */
106 #define MEM_SIZE_256 256 /* MB */
107 #define MEM_SIZE_512 512 /* MB */
108 #define MEM_SIZE_1024 1024 /* MB */
109 #define MEM_SIZE_2048 2048 /* MB */
111 /* thresholds for 64M RAM*/
112 #define DYNAMIC_PROCESS_64_THRES 10 /* MB */
113 #define DYNAMIC_PROCESS_64_LEAVE 30 /* MB */
114 #define MEMCG_MEMORY_64_THRES_SWAP 15 /* MB */
115 #define MEMCG_MEMORY_64_THRES_LOW 8 /* MB */
116 #define MEMCG_MEMORY_64_THRES_MEDIUM 5 /* MB */
117 #define MEMCG_MEMORY_64_THRES_LEAVE 8 /* MB */
119 /* thresholds for 256M RAM */
120 #define DYNAMIC_PROCESS_256_THRES 50 /* MB */
121 #define DYNAMIC_PROCESS_256_LEAVE 80 /* MB */
122 #define MEMCG_MEMORY_256_THRES_SWAP 40 /* MB */
123 #define MEMCG_MEMORY_256_THRES_LOW 20 /* MB */
124 #define MEMCG_MEMORY_256_THRES_MEDIUM 10 /* MB */
125 #define MEMCG_MEMORY_256_THRES_LEAVE 20 /* MB */
127 /* threshold for 512M RAM */
128 #define DYNAMIC_PROCESS_512_THRES 80 /* MB */
129 #define DYNAMIC_PROCESS_512_LEAVE 100 /* MB */
130 #define DYNAMIC_PROCESS_512_THRESLAUNCH 60 /* MB */
131 #define DYNAMIC_PROCESS_512_LEAVELAUNCH 80 /* MB */
132 #define MEMCG_MEMORY_512_THRES_SWAP 100 /* MB */
133 #define MEMCG_MEMORY_512_THRES_LOW 50 /* MB */
134 #define MEMCG_MEMORY_512_THRES_MEDIUM 40 /* MB */
135 #define MEMCG_MEMORY_512_THRES_LEAVE 60 /* MB */
137 /* threshold for more than 1024M RAM */
138 #define DYNAMIC_PROCESS_1024_THRES 150 /* MB */
139 #define DYNAMIC_PROCESS_1024_LEAVE 300 /* MB */
140 #define MEMCG_MEMORY_1024_THRES_SWAP 300 /* MB */
141 #define MEMCG_MEMORY_1024_THRES_LOW 200 /* MB */
142 #define MEMCG_MEMORY_1024_THRES_MEDIUM 100 /* MB */
143 #define MEMCG_MEMORY_1024_THRES_LEAVE 150 /* MB */
145 /* threshold for more than 2048M RAM */
146 #define DYNAMIC_PROCESS_2048_THRES 200 /* MB */
147 #define DYNAMIC_PROCESS_2048_LEAVE 500 /* MB */
148 #define MEMCG_MEMORY_2048_THRES_SWAP 300 /* MB */
149 #define MEMCG_MEMORY_2048_THRES_LOW 200 /* MB */
150 #define MEMCG_MEMORY_2048_THRES_MEDIUM 160 /* MB */
151 #define MEMCG_MEMORY_2048_THRES_LEAVE 300 /* MB */
153 static int thresholds[MEMNOTIFY_MAX_LEVELS];
154 static int dynamic_process_threshold[DYNAMIC_KILL_MAX];
155 static int dynamic_process_leave[DYNAMIC_KILL_MAX];
165 unsigned int min_limit; /* minimum limit */
166 /* limit ratio, if don't want to set limit, use NO_LIMIT*/
168 unsigned int oomleave; /* leave memory usage */
169 char *cgroup_name; /* cgroup name */
170 unsigned int thres_low; /* low level threshold */
171 unsigned int thres_medium; /* medium level threshold */
172 unsigned int thres_leave; /* leave threshold */
173 /* vmpressure event string. If don't want to register event, use null */
175 /* compare function for selecting victims in each cgroup */
176 int (*compare_fn) (const struct task_info *, const struct task_info *);
179 struct lowmem_process_entry {
182 void (*action) (void);
188 pid_t pids[MAX_MEMORY_CGROUP_VICTIMS];
191 /* low memory action function for cgroup */
192 static void memory_cgroup_medium_act(int memcg_idx);
193 static int compare_mem_victims(const struct task_info *ta, const struct task_info *tb);
194 static int compare_bg_victims(const struct task_info *ta, const struct task_info *tb);
195 static int compare_fg_victims(const struct task_info *ta, const struct task_info *tb);
196 /* low memory action function */
197 static void normal_act(void);
198 static void swap_act(void);
199 static void low_act(void);
200 static void medium_act(void);
202 static Eina_Bool medium_cb(void *data);
204 #define LOWMEM_ENTRY(c, n, act) \
205 { MEMNOTIFY_##c, MEMNOTIFY_##n, act}
207 static struct lowmem_process_entry lpe[] = {
208 LOWMEM_ENTRY(NORMAL, SWAP, swap_act),
209 LOWMEM_ENTRY(NORMAL, LOW, low_act),
210 LOWMEM_ENTRY(NORMAL, MEDIUM, medium_act),
211 LOWMEM_ENTRY(SWAP, NORMAL, normal_act),
212 LOWMEM_ENTRY(SWAP, LOW, low_act),
213 LOWMEM_ENTRY(SWAP, MEDIUM, medium_act),
214 LOWMEM_ENTRY(LOW, SWAP, swap_act),
215 LOWMEM_ENTRY(LOW, NORMAL, normal_act),
216 LOWMEM_ENTRY(LOW, MEDIUM, medium_act),
217 LOWMEM_ENTRY(MEDIUM, SWAP, swap_act),
218 LOWMEM_ENTRY(MEDIUM, NORMAL, normal_act),
219 LOWMEM_ENTRY(MEDIUM, LOW, low_act),
222 static struct memcg_class memcg_class[MEMCG_MAX_GROUPS] = {
223 {NO_LIMIT, MEMCG_MEMORY_LIMIT_RATIO,
226 0, "medium", /* register medium event*/
227 compare_mem_victims},
228 {MEMCG_FOREGROUND_MIN_LIMIT, MEMCG_FOREGROUND_LIMIT_RATIO,
229 0, "memory/foreground1",
231 MEMCG_FOREGROUND_THRES_LEAVE, "medium",
233 {MEMCG_FOREGROUND_MIN_LIMIT, MEMCG_FOREGROUND_LIMIT_RATIO,
234 0, "memory/foreground2",
236 MEMCG_FOREGROUND_THRES_LEAVE, "medium",
238 {MEMCG_FOREGROUND_MIN_LIMIT, MEMCG_FOREGROUND_LIMIT_RATIO,
239 0, "memory/foreground3",
241 MEMCG_FOREGROUND_THRES_LEAVE, "medium",
243 {MEMCG_BACKGROUND_MIN_LIMIT, MEMCG_BACKGROUND_LIMIT_RATIO,
244 0, "memory/background",
246 0, NULL, /* register no event*/
250 static int evfd[MEMCG_MAX_GROUPS] = {-1, };
251 static int cur_mem_state = MEMNOTIFY_NORMAL;
252 static Ecore_Timer *oom_check_timer = NULL;
253 static Ecore_Timer *oom_sigkill_timer = NULL;
254 static pid_t killed_fg_victim;
255 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
256 struct victims killed_tasks = {0, {0, }};
258 static pthread_t oom_thread = 0;
259 static pthread_mutex_t oom_mutex = PTHREAD_MUTEX_INITIALIZER;
260 static pthread_cond_t oom_cond = PTHREAD_COND_INITIALIZER;
262 static unsigned long totalram;
263 static unsigned long ktotalram;
265 static const struct module_ops memory_modules_ops;
266 static const struct module_ops *lowmem_ops;
268 static inline void get_total_memory(void)
275 totalram = si.totalram;
276 ktotalram = BtoKB(totalram);
280 unsigned int get_available(void)
285 unsigned int free = 0, cached = 0;
286 unsigned int available = 0;
288 fp = fopen(MEMINFO_PATH, "r");
291 _E("%s open failed, %d", buf, fp);
295 while (fgets(buf, PATH_MAX, fp) != NULL) {
296 if ((idx = strstr(buf, "MemFree:"))) {
297 idx += strlen("MemFree:");
298 while (*idx < '0' || *idx > '9')
301 } else if ((idx = strstr(buf, "MemAvailable:"))) {
302 idx += strlen("MemAvailable:");
303 while (*idx < '0' || *idx > '9')
305 available = atoi(idx);
307 } else if((idx = strstr(buf, "Cached:"))) {
308 idx += strlen("Cached:");
309 while (*idx < '0' || *idx > '9')
317 available = free + cached;
324 static bool get_mem_usage_by_pid(pid_t pid, unsigned int *rss)
327 char proc_path[PATH_MAX];
329 sprintf(proc_path, "/proc/%d/statm", pid);
330 fp = fopen(proc_path, "r");
334 if (fscanf(fp, "%*s %d", rss) < 1) {
341 /* convert page to Kb */
346 static unsigned int get_mem_usage(int idx)
349 char buf[LOWMEM_PATH_MAX] = {0,};
352 sprintf(buf, "%s/%s/memory.usage_in_bytes",
353 MEMCG_PATH, memcg_class[idx].cgroup_name);
357 _E("%s open failed, %d", buf, f);
358 return RESOURCED_ERROR_FAIL;
360 if (fgets(buf, 32, f) == NULL) {
361 _E("fgets failed\n");
363 return RESOURCED_ERROR_FAIL;
371 static int get_mem_usage_anon(int idx, unsigned int *result)
374 char buf[LOWMEM_PATH_MAX] = {0,};
375 char line[BUF_MAX] = {0, };
376 char name[30] = {0, };
377 unsigned int tmp, active_anon = 0, inactive_anon = 0;
379 sprintf(buf, "%s/%s/memory.stat",
380 MEMCG_PATH, memcg_class[idx].cgroup_name);
384 _E("%s open failed, %d", buf, f);
385 return RESOURCED_ERROR_FAIL;
387 while (fgets(line, BUF_MAX, f) != NULL) {
388 if (sscanf(line, "%s %d", name, &tmp)) {
389 if (!strcmp(name, "inactive_anon")) {
391 } else if (!strcmp(name, "active_anon")) {
398 *result = active_anon + inactive_anon;
400 return RESOURCED_ERROR_NONE;
403 static int memps_file_select(const struct dirent *entry)
405 return (strstr(entry->d_name, "memps") ? 1 : 0);
408 int compare_func(const struct dirent **a, const struct dirent **b)
410 const char *fn_a = (*a)->d_name;
411 const char *fn_b = (*b)->d_name;
412 char *str_at = strrchr(fn_a, '_') + 1;
413 char *str_bt = strrchr(fn_b, '_') + 1;
415 return strcmp(str_at, str_bt);
418 static void clear_logs(char *dir)
420 struct dirent **namelist;
424 n = scandir(dir, &namelist, memps_file_select, compare_func);
425 _D("num of log files %d", n);
426 if (n < MAX_MEMPS_LOGS) {
433 for (i = 0; i < n; i++) {
434 if (i < NUM_RM_LOGS) {
436 strcat(fname, namelist[i]->d_name);
437 _D("remove log file %s", fname);
440 _E("%s file cannot removed", fname);
448 static void make_memps_log(char *file, pid_t pid, char *victim_name)
452 char new_log[BUF_MAX];
453 static pid_t old_pid;
461 if (localtime_r(&now, &cur_tm) == NULL) {
462 _E("Fail to get localtime");
466 snprintf(new_log, sizeof(new_log),
467 "%s_%s_%d_%.4d%.2d%.2d%.2d%.2d%.2d", file, victim_name,
468 pid, (1900 + cur_tm.tm_year), 1 + cur_tm.tm_mon,
469 cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min,
473 execl(MEMPS_EXEC_PATH, MEMPS_EXEC_PATH, "-r", new_log, (char *)NULL);
478 static int lowmem_check_current_state(int memcg_index)
480 unsigned int usage, oomleave;
483 oomleave = memcg_class[memcg_index].oomleave;
484 ret = get_mem_usage_anon(memcg_index, &usage);
487 _D("getting anonymous usage fails");
491 if (oomleave > usage) {
492 _D("%s : usage : %u, leave threshold : %u",
493 __func__, usage, oomleave);
494 return RESOURCED_ERROR_NONE;
496 _D("%s : usage : %u, leave threshold: %u",
497 __func__, usage, oomleave);
498 return RESOURCED_ERROR_FAIL;
502 static int compare_mem_victims(const struct task_info *ta, const struct task_info *tb)
509 * Weight task size ratio to totalram by OOM_SCORE_POINT_WEIGHT so that
510 * tasks with score -1000 or -900 could be selected as victims if they consumes
511 * memory more than 70% of totalram.
513 pa = (int)(ta->size * OOM_SCORE_POINT_WEIGHT) / ktotalram + ta->oom_score_adj;
514 pb = (int)(tb->size * OOM_SCORE_POINT_WEIGHT) / ktotalram + tb->oom_score_adj;
519 static int compare_bg_victims(const struct task_info *ta, const struct task_info *tb)
522 * Firstly, sort by oom_score_adj
523 * Secondly, sort by task size
528 if (ta->oom_score_adj != tb->oom_score_adj)
529 return (tb->oom_score_adj - ta->oom_score_adj);
531 return ((int)(tb->size) - (int)(ta->size));
534 static int compare_fg_victims(const struct task_info *ta, const struct task_info *tb)
537 * only sort by task size
542 return ((int)(tb->size) - (int)(ta->size));
545 static int lowmem_get_cgroup_victims(int idx, int max_victims, struct task_info *selected,
546 unsigned should_be_freed, int flags)
549 char buf[LOWMEM_PATH_MAX] = {0, };
552 unsigned total_victim_size = 0;
553 char appname[PATH_MAX] = {0, };
555 GArray *victim_candidates = NULL;
557 victim_candidates = g_array_new(false, false, sizeof(struct task_info));
559 /* if g_array_new fails, return the current number of victims */
560 if (victim_candidates == NULL)
563 if (idx == MEMCG_MEMORY) {
564 sprintf(buf, "%s/%s/system.slice/cgroup.procs",
565 MEMCG_PATH, memcg_class[idx].cgroup_name);
570 sprintf(buf, "%s/%s/cgroup.procs",
571 MEMCG_PATH, memcg_class[idx].cgroup_name);
575 _E("%s open failed, %d", buf, f);
577 * if task read in this cgroup fails,
578 * return the current number of victims
584 while (fgets(buf, 32, f) != NULL) {
585 struct task_info new_victim;
588 unsigned int tsize = 0;
592 if (proc_get_oom_score_adj(tpid, &toom) < 0) {
593 _D("pid(%d) was already terminated", tpid);
597 if (!get_mem_usage_by_pid(tpid, &tsize)) {
598 _D("pid(%d) size is not available\n", tpid);
602 if(proc_get_cmdline(tpid, appname) == RESOURCED_ERROR_FAIL)
605 for (i = 0; i < victim_candidates->len; i++) {
606 struct task_info *tsk = &g_array_index(victim_candidates,
607 struct task_info, i);
608 if (getpgid(tpid) == tsk->pgid) {
610 if (tsk->oom_score_adj <= 0 && toom > 0) {
612 tsk->oom_score_adj = toom;
618 if (i == victim_candidates->len) {
619 new_victim.pid = tpid;
620 new_victim.pgid = getpgid(tpid);
621 new_victim.oom_score_adj = toom;
622 new_victim.size = tsize;
624 g_array_append_val(victim_candidates, new_victim);
629 * if there is no tasks in this cgroup,
630 * return the current number of victims
632 if (victim_candidates->len == 0) {
633 g_array_free(victim_candidates, true);
638 g_array_sort(victim_candidates,
639 (GCompareFunc)memcg_class[idx].compare_fn);
641 for (i = 0; i < victim_candidates->len; i++) {
642 struct task_info tsk;
643 if (num_victims >= max_victims ||
644 (!(flags & OOM_NOMEMORY_CHECK) &&
645 total_victim_size >= should_be_freed)) {
649 tsk = g_array_index(victim_candidates, struct task_info, i);
651 if (tsk.oom_score_adj < OOMADJ_BACKGRD_UNLOCKED) {
652 unsigned int available;
653 if ((flags & OOM_FORCE) || !(flags & OOM_TIMER_CHECK)) {
654 _D("%d is skipped during force kill", tsk.pid);
657 available = get_available();
658 if ((flags & OOM_TIMER_CHECK) &&
659 (available > thresholds[MEMNOTIFY_MEDIUM] +
661 _D("available: %d MB, larger than threshold margin",
667 selected[num_victims].pid = tsk.pid;
668 selected[num_victims].pgid = tsk.pgid;
669 selected[num_victims].oom_score_adj = tsk.oom_score_adj;
670 selected[num_victims].size = tsk.size;
671 total_victim_size += tsk.size >> 10;
675 g_array_free(victim_candidates, true);
682 static inline int is_dynamic_process_killer(int flags) {
683 return ((flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK));
686 static int lowmem_swap_cgroup_oom_killer(int flags)
689 char appname[PATH_MAX];
691 char buf[LOWMEM_PATH_MAX] = {0, };
693 unsigned int tsize = 0;
696 swap_type = swap_status(SWAP_GET_TYPE, NULL);
697 if (swap_type <= SWAP_OFF)
700 sprintf(buf, "%s/memory/swap/cgroup.procs",
705 _E("%s open failed, %d", buf, f);
706 return RESOURCED_ERROR_FAIL;
709 while (fgets(buf, 32, f) != NULL) {
715 if (proc_get_oom_score_adj(tpid, &toom) < 0) {
716 _D("pid(%d) was already terminated", tpid);
720 if (!get_mem_usage_by_pid(tpid, &tsize)) {
721 _D("pid(%d) size is not available\n", tpid);
725 /* To Do: skip by checking pgid? */
726 if (toom < OOMADJ_BACKGRD_UNLOCKED)
729 ret = proc_get_cmdline(tpid, appname);
730 if (ret == RESOURCED_ERROR_FAIL)
733 /* make memps log for killing application firstly */
735 make_memps_log(MEMPS_LOG_FILE, tpid, appname);
739 proc_remove_process_list(tpid);
741 _E("we killed, lowmem lv2 = %d (%s) score = %d, size = %u KB\n",
742 tpid, appname, toom, tsize);
743 if (is_dynamic_process_killer(flags) &&
744 count >= MAX_SWAP_VICTIMS)
750 _I("number of swap victims = %d\n", count);
751 if (!(flags & OOM_FORCE) && (count >= MAX_SWAP_VICTIMS))
752 usleep(OOM_MULTIKILL_WAIT);
757 /* Find victims: (SWAP -> ) BACKGROUND */
758 static int lowmem_get_memory_cgroup_victims(struct task_info *selected,
761 int i, swap_victims, count = 0;
762 unsigned int available, should_be_freed = 0;
764 swap_victims = lowmem_swap_cgroup_oom_killer(flags);
766 if ((flags & OOM_FORCE) && swap_victims < MAX_FD_VICTIMS) {
767 count = lowmem_get_cgroup_victims(MEMCG_BACKGROUND,
768 MAX_FD_VICTIMS - swap_victims, selected,
770 _D("kill %d victims in %s cgroup",
771 count, memcg_class[MEMCG_BACKGROUND].cgroup_name);
775 available = get_available();
776 if (available < memcg_class[MEMCG_MEMORY].thres_leave)
777 should_be_freed = memcg_class[MEMCG_MEMORY].thres_leave - available;
779 _I("should_be_freed = %u MB", should_be_freed);
780 if (should_be_freed == 0 || swap_victims >= num_max_victims)
783 for (i = MEMCG_MAX_GROUPS - 1; i >= 0; i--) {
784 if ((flags & OOM_TIMER_CHECK) && i < MEMCG_BACKGROUND &&
785 available > thresholds[MEMNOTIFY_MEDIUM]) {
786 _D("in timer, not kill fg app, available %u > threshold %u",
787 available, thresholds[MEMNOTIFY_MEDIUM]);
791 count = lowmem_get_cgroup_victims(i,
792 num_max_victims - swap_victims,
793 selected, should_be_freed, flags);
795 _D("kill %d victims in %s cgroup",
796 count, memcg_class[i].cgroup_name);
799 _D("There are no victims to be killed in %s cgroup",
800 memcg_class[i].cgroup_name);
807 static int lowmem_get_victims(int idx, struct task_info *selected,
812 if (idx == MEMCG_MEMORY)
813 count = lowmem_get_memory_cgroup_victims(selected, flags);
815 count = lowmem_get_cgroup_victims(idx,
816 MAX_CGROUP_VICTIMS, selected,
817 memcg_class[idx].thres_leave,
823 static Eina_Bool send_sigkill_cb(void *data)
827 _D("kill by SIGKILL timer tasks num = %d", killed_tasks.num);
829 for (i = 0; i < killed_tasks.num; i++) {
830 kill(killed_tasks.pids[i], SIGKILL);
831 _D("killed %d by SIGKILL", killed_tasks.pids[i]);
834 killed_tasks.num = 0;
835 ecore_timer_del(oom_sigkill_timer);
836 oom_sigkill_timer = NULL;
837 return ECORE_CALLBACK_CANCEL;
841 static int lowmem_kill_victims(int memcg_idx,
842 int count, struct task_info *selected, int flags)
844 const pid_t self = getpid();
845 int pid, ret, oom_score_adj, i;
846 unsigned total_size = 0, size;
847 char appname[PATH_MAX];
849 for (i = 0; i < count; i++) {
850 /* check current memory status */
851 if (!(flags & OOM_FORCE) && memcg_idx != MEMCG_MEMORY &&
852 lowmem_check_current_state(memcg_idx) >= 0)
855 pid = selected[i].pid;
856 oom_score_adj = selected[i].oom_score_adj;
857 size = selected[i].size;
859 if (pid <= 0 || self == pid)
861 ret = proc_get_cmdline(pid, appname);
862 if (ret == RESOURCED_ERROR_FAIL)
865 if (!strcmp("memps", appname)) {
866 _E("memps(%d) was selected, skip it", pid);
869 if (!strcmp("crash-worker", appname)) {
870 _E("crash-worker(%d) was selected, skip it", pid);
874 /* make memps log for killing application firstly */
876 make_memps_log(MEMPS_LOG_FILE, pid, appname);
880 proc_remove_process_list(pid);
881 if (flags & OOM_FORCE) {
883 if (killed_tasks.num < MAX_MEMORY_CGROUP_VICTIMS)
884 killed_tasks.pids[killed_tasks.num++] = pid;
888 _E("we killed, force(%d), lowmem lv2 = %d (%s) score = %d, size = %u KB, victim total size = %u KB\n",
889 flags & OOM_FORCE, pid, appname, oom_score_adj, size, total_size);
891 if (memcg_idx >= MEMCG_FOREGROUND &&
892 memcg_idx < MEMCG_BACKGROUND)
893 killed_fg_victim = selected[0].pid;
895 if (oom_score_adj > OOMADJ_FOREGRD_UNLOCKED)
899 make_memps_log(MEMPS_LOG_FILE, pid, appname);
905 int lowmem_oom_killer_cb(int memcg_idx, int flags)
907 struct task_info selected[MAX_MEMORY_CGROUP_VICTIMS] = {{0, 0, OOMADJ_SU, 0}, };
910 /* get multiple victims from /sys/fs/cgroup/memory/.../tasks */
911 count = lowmem_get_victims(memcg_idx, selected, flags);
914 _D("get %s cgroup victim is failed",
915 memcg_class[memcg_idx].cgroup_name);
919 count = lowmem_kill_victims(memcg_idx, count, selected, flags);
920 clear_logs(MEMPS_LOG_PATH);
921 logging_control(LOGGING_UPDATE_STATE, NULL);
926 void lowmem_dynamic_process_killer(int type)
928 struct task_info selected[MAX_MEMORY_CGROUP_VICTIMS] = {{0, 0, OOMADJ_SU, 0}, };
930 unsigned available = get_available();
931 unsigned should_be_freed;
932 int flags = OOM_FORCE;
935 if (!dynamic_process_threshold[type])
938 if (available >= dynamic_process_threshold[type])
941 change_memory_state(MEMNOTIFY_LOW, 1);
942 swap_victims = lowmem_swap_cgroup_oom_killer(flags);
943 if (swap_victims >= MAX_SWAP_VICTIMS)
946 available = get_available();
947 if (available >= dynamic_process_leave[type])
950 should_be_freed = dynamic_process_leave[type] - available;
951 _D("run dynamic killer, type=%d, available=%d, should_be_freed = %u", type, available, should_be_freed);
952 count = lowmem_get_cgroup_victims(MEMCG_BACKGROUND,
953 num_max_victims - swap_victims, selected,
954 should_be_freed, flags);
957 _D("get victim for dynamic process is failed");
961 lowmem_kill_victims(MEMCG_BACKGROUND, count, selected, flags);
962 change_memory_state(MEMNOTIFY_NORMAL, 0);
965 if (oom_sigkill_timer == NULL) {
966 _D("start timer to sigkill tasks");
968 ecore_timer_add(OOM_SIGKILL_WAIT, send_sigkill_cb,
971 killed_tasks.num = 0;
974 static void *lowmem_oom_killer_pthread(void *arg)
978 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
982 * When signalled by main thread,
983 * it starts lowmem_oom_killer_cb().
985 ret = pthread_mutex_lock(&oom_mutex);
987 _E("oom thread::pthread_mutex_lock() failed, %d", ret);
991 ret = pthread_cond_wait(&oom_cond, &oom_mutex);
993 _E("oom thread::pthread_cond_wait() failed, %d", ret);
994 pthread_mutex_unlock(&oom_mutex);
998 _I("oom thread conditional signal received and start");
999 lowmem_oom_killer_cb(MEMCG_MEMORY, OOM_NONE);
1000 _I("lowmem_oom_killer_cb finished");
1002 ret = pthread_mutex_unlock(&oom_mutex);
1004 _E("oom thread::pthread_mutex_unlock() failed, %d", ret);
1009 /* Now our thread finishes - cleanup TID */
1015 static char *convert_to_str(int mem_state)
1018 switch (mem_state) {
1019 case MEMNOTIFY_NORMAL:
1022 case MEMNOTIFY_SWAP:
1028 case MEMNOTIFY_MEDIUM:
1037 static void change_lowmem_state(unsigned int mem_state)
1039 if (cur_mem_state == mem_state)
1042 _I("[LOW MEM STATE] %s ==> %s", convert_to_str(cur_mem_state),
1043 convert_to_str(mem_state));
1044 cur_mem_state = mem_state;
1047 static void lowmem_swap_memory(void)
1052 if (cur_mem_state == MEMNOTIFY_NORMAL)
1055 swap_type = swap_status(SWAP_GET_TYPE, NULL);
1057 if (swap_type == SWAP_ON) {
1060 pid = (pid_t)swap_status(SWAP_GET_CANDIDATE_PID, NULL);
1063 _I("swap cgroup entered : pid : %d", (int)pid);
1064 resourced_notify(RESOURCED_NOTIFIER_SWAP_MOVE_CGROUP, (void*)&pid);
1066 if (swap_status(SWAP_GET_STATUS, NULL) == SWAP_OFF)
1067 resourced_notify(RESOURCED_NOTIFIER_SWAP_RESTART, NULL);
1068 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, NULL);
1073 static void normal_act(void)
1077 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1079 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1080 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL)
1081 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1082 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1084 change_lowmem_state(MEMNOTIFY_NORMAL);
1087 static void swap_act(void)
1091 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1093 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1095 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL)
1096 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1097 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1098 change_lowmem_state(MEMNOTIFY_SWAP);
1101 static void low_act(void)
1105 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1108 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1110 change_lowmem_state(MEMNOTIFY_LOW);
1112 /* Since vconf for soft warning could be set during low memory check,
1113 * we set it only when the current status is not soft warning.
1115 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_SOFT_WARNING)
1116 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1117 VCONFKEY_SYSMAN_LOW_MEMORY_SOFT_WARNING);
1120 static Eina_Bool medium_cb(void *data)
1122 unsigned int available;
1125 available = get_available();
1126 _D("available = %u, timer run until reaching leave threshold", available);
1128 if (available >= memcg_class[MEMCG_MEMORY].thres_leave && oom_check_timer != NULL) {
1129 ecore_timer_del(oom_check_timer);
1130 oom_check_timer = NULL;
1131 _D("oom_check_timer deleted after reaching leave threshold");
1133 return ECORE_CALLBACK_CANCEL;
1136 _I("cannot reach leave threshold, timer again");
1137 count = lowmem_oom_killer_cb(MEMCG_MEMORY, OOM_TIMER_CHECK);
1140 * After running oom killer in timer, but there is no victim,
1143 if (!count && available >= thresholds[MEMNOTIFY_MEDIUM] &&
1144 oom_check_timer != NULL) {
1145 ecore_timer_del(oom_check_timer);
1146 oom_check_timer = NULL;
1147 _D("oom_check_timer deleted, available %u > threshold %u",
1148 available, thresholds[MEMNOTIFY_MEDIUM]);
1150 return ECORE_CALLBACK_CANCEL;
1152 return ECORE_CALLBACK_RENEW;
1155 static void medium_act(void)
1159 change_lowmem_state(MEMNOTIFY_MEDIUM);
1161 /* signal to lowmem_oom_killer_pthread to start killer */
1162 ret = pthread_mutex_trylock(&oom_mutex);
1164 _E("medium_act::pthread_mutex_trylock() failed, %d, errno: %d", ret, errno);
1167 _I("oom mutex trylock success");
1168 pthread_cond_signal(&oom_cond);
1169 _I("send signal to oom killer thread");
1170 pthread_mutex_unlock(&oom_mutex);
1172 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1173 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1175 if (oom_check_timer == NULL) {
1176 _D("timer run until reaching leave threshold");
1178 ecore_timer_add(OOM_TIMER_INTERVAL, medium_cb, (void *)NULL);
1184 static int lowmem_process(int mem_state)
1187 for (i = 0; i < ARRAY_SIZE(lpe); i++) {
1188 if ((cur_mem_state == lpe[i].cur_mem_state)
1189 && (mem_state == lpe[i].new_mem_state)) {
1190 _D("cur_mem_state = %d, new_mem_state = %d\n",
1191 cur_mem_state, mem_state);
1193 return RESOURCED_ERROR_NONE;
1198 return RESOURCED_ERROR_NONE;
1201 static bool is_fg_victim_killed(int memcg_idx)
1203 if (killed_fg_victim) {
1204 char buf[LOWMEM_PATH_MAX] = {0, };
1206 sprintf(buf, "%s/memory/foreground%d/cgroup.procs", MEMCG_PATH,
1208 f = fopen(buf, "r");
1210 _E("%s open failed, %d", buf, f);
1211 /* if file open fails, start to kill */
1215 while (fgets(buf, 32, f) != NULL) {
1216 pid_t pid = atoi(buf);
1219 * not yet removed from foreground cgroup,
1220 * so, not start to kill again
1222 if (killed_fg_victim == pid) {
1229 * in this case, memory is low even though the previous
1230 * fg victim was already killed. so, start to kill.
1233 killed_fg_victim = 0;
1240 static void show_foreground_procs(int memcg_idx) {
1241 char buf[LOWMEM_PATH_MAX] = {0, };
1243 sprintf(buf, "%s/memory/foreground%d/cgroup.procs", MEMCG_PATH,
1245 f = fopen(buf, "r");
1247 _E("%s open failed, %d", buf, f);
1248 /* if file open fails, start to kill */
1252 while (fgets(buf, 32, f) != NULL) {
1253 pid_t pid = atoi(buf);
1255 get_mem_usage_by_pid(pid, &size);
1256 _E("pid = %d, size = %u KB", pid, size);
1262 static void memory_cgroup_medium_act(int memcg_idx)
1264 _I("[LOW MEM STATE] memory cgroup %s oom state",
1265 memcg_class[memcg_idx].cgroup_name);
1267 /* only start to kill fg victim when no pending fg victim */
1268 if ((memcg_idx >= MEMCG_FOREGROUND && memcg_idx < MEMCG_BACKGROUND)
1269 && is_fg_victim_killed(memcg_idx)) {
1270 show_foreground_procs(memcg_idx);
1271 lowmem_oom_killer_cb(memcg_idx, OOM_NONE);
1275 static unsigned int lowmem_eventfd_read(int fd)
1278 uint64_t dummy_state;
1279 ret = read(fd, &dummy_state, sizeof(dummy_state));
1283 static unsigned int check_mem_state(unsigned int available)
1286 for (mem_state = MEMNOTIFY_MAX_LEVELS -1; mem_state > MEMNOTIFY_NORMAL; mem_state--) {
1287 if (available <= thresholds[mem_state])
1294 void change_memory_state(int state, int force)
1296 unsigned int available;
1302 available = get_available();
1303 mem_state = check_mem_state(available);
1304 _D("available = %u, mem_state = %d", available, mem_state);
1307 switch (mem_state) {
1308 case MEMNOTIFY_NORMAL:
1311 case MEMNOTIFY_SWAP:
1317 case MEMNOTIFY_MEDIUM:
1325 static void lowmem_handler(void)
1327 static unsigned int prev_available;
1328 unsigned int available;
1331 available = get_available();
1333 if (prev_available == available)
1336 mem_state = check_mem_state(available);
1337 lowmem_process(mem_state);
1338 prev_available = available;
1341 static void lowmem_cgroup_handler(int memcg_idx)
1346 ret = get_mem_usage_anon(memcg_idx, &usage);
1349 _D("getting anonymous memory usage fails");
1353 if (usage >= memcg_class[memcg_idx].thres_medium)
1354 memory_cgroup_medium_act(memcg_idx);
1356 _I("anon page (%u) is under medium threshold (%u)",
1357 usage >> 20, memcg_class[memcg_idx].thres_medium >> 20);
1360 static Eina_Bool lowmem_cb(void *data, Ecore_Fd_Handler *fd_handler)
1364 if (!ecore_main_fd_handler_active_get(fd_handler, ECORE_FD_READ)) {
1365 _E("ecore_main_fd_handler_active_get error , return\n");
1366 return ECORE_CALLBACK_CANCEL;
1369 fd = ecore_main_fd_handler_fd_get(fd_handler);
1371 _E("ecore_main_fd_handler_fd_get error , return\n");
1372 return ECORE_CALLBACK_CANCEL;
1374 lowmem_eventfd_read(fd);
1376 for (i = 0; i < MEMCG_MAX_GROUPS; i++) {
1377 if (fd == evfd[i]) {
1378 if (i == MEMCG_MEMORY) {
1381 lowmem_cgroup_handler(i);
1386 return ECORE_CALLBACK_RENEW;
1390 From memory.txt kernel document -
1391 To register a notifier, application need:
1392 - create an eventfd using eventfd(2)
1393 - open memory.oom_control file
1394 - write string like "<event_fd> <fd of memory.oom_control>"
1395 to cgroup.event_control
1398 static int setup_eventfd(void)
1401 int cgfd, pressurefd, res, sz;
1402 char buf[LOWMEM_PATH_MAX] = {0,};
1405 for (i = 0; i < MEMCG_MAX_GROUPS; i++) {
1406 if (memcg_class[i].event_string == NULL)
1408 /* open cgroup.event_control */
1409 sprintf(buf, "%s/%s/cgroup.event_control",
1410 MEMCG_PATH, memcg_class[i].cgroup_name);
1411 cgfd = open(buf, O_WRONLY);
1413 _E("open event_control failed");
1414 return RESOURCED_ERROR_FAIL;
1417 /* register event pressure_level */
1418 sprintf(buf, "%s/%s/memory.pressure_level",
1419 MEMCG_PATH, memcg_class[i].cgroup_name);
1420 pressurefd = open(buf, O_RDONLY);
1421 if (pressurefd < 0) {
1422 _E("open pressure control failed");
1424 return RESOURCED_ERROR_FAIL;
1427 /* create an eventfd using eventfd(2)
1428 use same event fd for using ecore event loop */
1429 evfd[i] = eventfd(0, O_NONBLOCK);
1431 _E("eventfd() error");
1434 return RESOURCED_ERROR_FAIL;
1438 /* write event fd low level */
1439 sz = sprintf(buf, "%d %d %s", evfd[i], pressurefd,
1440 memcg_class[i].event_string);
1442 res = write(cgfd, buf, sz);
1444 _E("write cgfd failed : %d for %s",
1445 res, memcg_class[i].cgroup_name);
1450 return RESOURCED_ERROR_FAIL;
1453 _I("register event fd success for %s cgroup",
1454 memcg_class[i].cgroup_name);
1455 ecore_main_fd_handler_add(evfd[i], ECORE_FD_READ,
1456 (Ecore_Fd_Cb)lowmem_cb, NULL, NULL, NULL);
1464 static int write_cgroup_node(const char *memcg_name,
1465 const char *file_name, unsigned int value)
1468 char buf[LOWMEM_PATH_MAX] = {0, };
1471 sprintf(buf, "%s/%s/%s", MEMCG_PATH, memcg_name, file_name);
1472 f = fopen(buf, "w");
1474 _E("%s open failed", buf);
1475 return RESOURCED_ERROR_FAIL;
1478 size = sprintf(buf, "%u", value);
1479 if (fwrite(buf, size, 1, f) != 1) {
1480 _E("fail fwrite %s\n", file_name);
1482 return RESOURCED_ERROR_FAIL;
1486 return RESOURCED_ERROR_NONE;
1489 void set_threshold(int level, int thres)
1491 thresholds[level] = thres;
1495 void set_leave_threshold(int thres)
1497 memcg_class[MEMCG_MEMORY].thres_leave = thres;
1501 void set_foreground_ratio(float ratio)
1504 for (i = MEMCG_FOREGROUND; i < MEMCG_BACKGROUND; i++)
1505 memcg_class[i].limit_ratio = ratio;
1509 static int load_mem_config(struct parse_result *result, void *user_data)
1515 if (strcmp(result->section, MEM_CONF_SECTION))
1516 return RESOURCED_ERROR_NONE;
1518 if (!strcmp(result->name, MEM_CONF_PREDEFINE)) {
1519 pid = find_pid_from_cmdline(result->value);
1521 proc_set_oom_score_adj(pid, OOMADJ_SERVICE_MIN);
1523 return RESOURCED_ERROR_NONE;
1526 static int set_memory_config(const char * section_name, const struct parse_result *result)
1528 if (!result || !section_name)
1531 if (strcmp(result->section, section_name))
1532 return RESOURCED_ERROR_NONE;
1534 if (!strcmp(result->name, "ThresholdSwap")) {
1535 int value = atoi(result->value);
1536 set_threshold(MEMNOTIFY_SWAP, value);
1537 } else if (!strcmp(result->name, "ThresholdLow")) {
1538 int value = atoi(result->value);
1539 set_threshold(MEMNOTIFY_LOW, value);
1540 } else if (!strcmp(result->name, "ThresholdMedium")) {
1541 int value = atoi(result->value);
1542 set_threshold(MEMNOTIFY_MEDIUM, value);
1543 } else if (!strcmp(result->name, "ThresholdLeave")) {
1544 int value = atoi(result->value);
1545 set_leave_threshold(value);
1546 } else if (!strcmp(result->name, "ForegroundRatio")) {
1547 float value = atof(result->value);
1548 set_foreground_ratio(value);
1549 } else if (!strcmp(result->name, "NumMaxVictims")) {
1550 int value = atoi(result->value);
1551 num_max_victims = value;
1552 _D("set number of max victims as %d", num_max_victims);
1553 } else if (!strcmp(result->name, "DynamicThreshold")) {
1554 int value = atoi(result->value);
1555 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP] = value;
1556 _D("set dynamic process threshold as %d",
1557 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP]);
1558 } else if (!strcmp(result->name, "DynamicLeave")) {
1559 int value = atoi(result->value);
1560 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP] = value;
1561 _D("set dynamic process leave threshold as %d",
1562 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP]);
1563 } else if (!strcmp(result->name, "DynamicThresholdLaunch")) {
1564 int value = atoi(result->value);
1565 dynamic_process_threshold[DYNAMIC_KILL_LUNCH] = value;
1566 _D("set dynamic process threshold as %d",
1567 dynamic_process_threshold[DYNAMIC_KILL_LUNCH]);
1568 } else if (!strcmp(result->name, "DynamicLeaveLaunch")) {
1569 int value = atoi(result->value);
1570 dynamic_process_leave[DYNAMIC_KILL_LUNCH] = value;
1571 _D("set dynamic process leave threshold as %d",
1572 dynamic_process_leave[DYNAMIC_KILL_LUNCH]);
1574 return RESOURCED_ERROR_NONE;
1577 static int memory_load_64_config(struct parse_result *result, void *user_data)
1579 return set_memory_config("Memory64", result);
1582 static int memory_load_256_config(struct parse_result *result, void *user_data)
1584 return set_memory_config("Memory256", result);
1587 static int memory_load_512_config(struct parse_result *result, void *user_data)
1589 return set_memory_config("Memory512", result);
1592 static int memory_load_1024_config(struct parse_result *result, void *user_data)
1594 return set_memory_config("Memory1024", result);
1597 static int memory_load_2048_config(struct parse_result *result, void *user_data)
1599 return set_memory_config("Memory2048", result);
1602 /* init thresholds depending on total ram size. */
1603 static void init_thresholds(void)
1606 unsigned long total_ramsize = BtoMB(totalram);
1607 _D("Total : %lu MB", total_ramsize);
1609 if (total_ramsize <= MEM_SIZE_64) {
1610 /* set thresholds for ram size 64M */
1611 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_64_THRES;
1612 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_64_LEAVE;
1613 set_threshold(MEMNOTIFY_SWAP, MEMCG_MEMORY_64_THRES_SWAP);
1614 set_threshold(MEMNOTIFY_LOW, MEMCG_MEMORY_64_THRES_LOW);
1615 set_threshold(MEMNOTIFY_MEDIUM, MEMCG_MEMORY_64_THRES_MEDIUM);
1616 set_leave_threshold(MEMCG_MEMORY_64_THRES_LEAVE);
1617 config_parse(MEM_CONF_FILE, memory_load_64_config, NULL);
1618 } else if (total_ramsize <= MEM_SIZE_256) {
1619 /* set thresholds for ram size 256M */
1620 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_256_THRES;
1621 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_256_LEAVE;
1622 set_threshold(MEMNOTIFY_SWAP, MEMCG_MEMORY_256_THRES_SWAP);
1623 set_threshold(MEMNOTIFY_LOW, MEMCG_MEMORY_256_THRES_LOW);
1624 set_threshold(MEMNOTIFY_MEDIUM, MEMCG_MEMORY_256_THRES_MEDIUM);
1625 set_leave_threshold(MEMCG_MEMORY_256_THRES_LEAVE);
1626 config_parse(MEM_CONF_FILE, memory_load_256_config, NULL);
1627 } else if (total_ramsize <= MEM_SIZE_512) {
1628 /* set thresholds for ram size 512M */
1629 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_512_THRES;
1630 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_512_LEAVE;
1631 dynamic_process_threshold[DYNAMIC_KILL_LUNCH] = DYNAMIC_PROCESS_512_THRESLAUNCH;
1632 dynamic_process_leave[DYNAMIC_KILL_LUNCH] = DYNAMIC_PROCESS_512_LEAVELAUNCH;
1633 set_threshold(MEMNOTIFY_SWAP, MEMCG_MEMORY_512_THRES_SWAP);
1634 set_threshold(MEMNOTIFY_LOW, MEMCG_MEMORY_512_THRES_LOW);
1635 set_threshold(MEMNOTIFY_MEDIUM, MEMCG_MEMORY_512_THRES_MEDIUM);
1636 set_leave_threshold(MEMCG_MEMORY_512_THRES_LEAVE);
1637 config_parse(MEM_CONF_FILE, memory_load_512_config, NULL);
1638 } else if (total_ramsize <= MEM_SIZE_1024) {
1639 /* set thresholds for ram size more than 1G */
1640 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_1024_THRES;
1641 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_1024_LEAVE;
1642 set_threshold(MEMNOTIFY_SWAP, MEMCG_MEMORY_1024_THRES_SWAP);
1643 set_threshold(MEMNOTIFY_LOW, MEMCG_MEMORY_1024_THRES_LOW);
1644 set_threshold(MEMNOTIFY_MEDIUM, MEMCG_MEMORY_1024_THRES_MEDIUM);
1645 set_leave_threshold(MEMCG_MEMORY_1024_THRES_LEAVE);
1646 config_parse(MEM_CONF_FILE, memory_load_1024_config, NULL);
1648 dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_2048_THRES;
1649 dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP] = DYNAMIC_PROCESS_2048_LEAVE;
1650 set_threshold(MEMNOTIFY_SWAP, MEMCG_MEMORY_2048_THRES_SWAP);
1651 set_threshold(MEMNOTIFY_LOW, MEMCG_MEMORY_2048_THRES_LOW);
1652 set_threshold(MEMNOTIFY_MEDIUM, MEMCG_MEMORY_2048_THRES_MEDIUM);
1653 set_leave_threshold(MEMCG_MEMORY_2048_THRES_LEAVE);
1654 config_parse(MEM_CONF_FILE, memory_load_2048_config, NULL);
1657 for (i = MEMNOTIFY_SWAP; i < MEMNOTIFY_MAX_LEVELS; i++)
1658 _I("set threshold for %d to %u", i, thresholds[i]);
1660 _I("set thres_leave to %u", memcg_class[MEMCG_MEMORY].thres_leave);
1661 _I("set dynamic process threshold to %u", dynamic_process_threshold[DYNAMIC_KILL_LARGEHEAP]);
1662 _I("set dynamic process leave to %u", dynamic_process_leave[DYNAMIC_KILL_LARGEHEAP]);
1665 static int create_foreground_memcg(void)
1668 char buf[LOWMEM_PATH_MAX] = {0, };
1669 for (i = MEMCG_FOREGROUND; i < MEMCG_MAX_GROUPS; i++) {
1670 sprintf(buf, "%s/%s", MEMCG_PATH, memcg_class[i].cgroup_name);
1671 if (mkdir(buf, 0755) && errno != EEXIST) {
1672 _E("mkdir %s failed, errno %d", buf, errno);
1673 return RESOURCED_ERROR_FAIL;
1675 _I("%s is successfuly created", buf);
1677 return RESOURCED_ERROR_NONE;
1680 static int init_memcg(void)
1682 unsigned int i, limit;
1683 _D("Total : %lu", totalram);
1684 int ret = RESOURCED_ERROR_NONE;
1686 for (i = 0; i < MEMCG_MAX_GROUPS; i++) {
1687 /* enable cgroup move */
1688 ret = write_cgroup_node(memcg_class[i].cgroup_name,
1689 MEMCG_MOVE_CHARGE_PATH, 3);
1693 /* for memcg with NO_LIMIT, do not set limit for cgroup limit */
1694 if (memcg_class[i].limit_ratio == NO_LIMIT)
1697 /* disable memcg OOM-killer */
1698 ret = write_cgroup_node(memcg_class[i].cgroup_name,
1699 MEMCG_OOM_CONTROL_PATH, 1);
1703 /* write limit_in_bytes */
1704 limit = (unsigned int)(memcg_class[i].limit_ratio*(float)totalram);
1705 if (limit > memcg_class[i].min_limit)
1706 limit = memcg_class[i].min_limit;
1707 ret = write_cgroup_node(memcg_class[i].cgroup_name,
1708 MEMCG_LIMIT_PATH, limit);
1712 _I("set %s's limit to %u", memcg_class[i].cgroup_name, limit);
1714 if (BtoMB(totalram) < MEM_SIZE_512 &&
1715 (i >= MEMCG_FOREGROUND && i < MEMCG_BACKGROUND)) {
1716 memcg_class[i].thres_leave = limit * MEMCG_FOREGROUND_LEAVE_RATIO;
1717 _I("set foreground%d leave %u for limit %u",
1718 i, memcg_class[i].thres_leave, limit);
1721 /* set threshold and oomleave for each memcg */
1722 memcg_class[i].thres_low =
1723 (unsigned int)(limit * MEMCG_LOW_RATIO);
1724 memcg_class[i].thres_medium =
1725 (unsigned int)(limit * MEMCG_MEDIUM_RATIO);
1726 memcg_class[i].oomleave =
1727 limit - (memcg_class[i].thres_leave << 20);
1733 static void lowmem_check(void)
1735 unsigned int available;
1737 available = get_available();
1738 _D("available = %u", available);
1740 if(cur_mem_state != MEMNOTIFY_SWAP &&
1741 (available <= thresholds[MEMNOTIFY_SWAP] &&
1742 available > thresholds[MEMNOTIFY_LOW])) {
1748 static int find_foreground_cgroup(struct proc_process_info_t *process_info) {
1749 int fg, min_fg = -1;
1750 unsigned int min_usage = UINT_MAX;
1753 * if this process group is already in one of the foreground cgroup,
1754 * put all of the process in this group into the same cgroup.
1756 if (process_info && process_info->memcg_idx >= MEMCG_FOREGROUND &&
1757 process_info->memcg_idx < MEMCG_FOREGROUND + NUM_FOREGROUND)
1758 return process_info->memcg_idx;
1761 * if any of the process in this group is not in foreground,
1762 * find foreground cgroup with minimum usage
1764 for (fg = MEMCG_FOREGROUND; fg < MEMCG_BACKGROUND; fg++) {
1766 usage = get_mem_usage(fg);
1768 /* select foreground memcg with no task first*/
1772 /* select forground memcg with minimum usage */
1773 if (usage > 0 && min_usage > usage) {
1780 return RESOURCED_ERROR_FAIL;
1785 static void lowmem_move_memcgroup(int pid, int oom_score_adj)
1787 char buf[LOWMEM_PATH_MAX] = {0,};
1789 int size, background = 0;
1790 unsigned long swap_args[1] = {0,};
1791 struct proc_process_info_t *process_info =
1792 find_process_info(NULL, pid, NULL);
1794 if (oom_score_adj >= OOMADJ_BACKGRD_LOCKED) {
1795 sprintf(buf, "%s/memory/background/cgroup.procs", MEMCG_PATH);
1796 proc_set_process_info_memcg(process_info, MEMCG_BACKGROUND);
1798 } else if (oom_score_adj >= OOMADJ_FOREGRD_LOCKED &&
1799 oom_score_adj < OOMADJ_BACKGRD_LOCKED) {
1800 int ret = find_foreground_cgroup(process_info);
1801 if (ret == RESOURCED_ERROR_FAIL) {
1802 _E("cannot find foreground cgroup");
1805 sprintf(buf, "%s/memory/foreground%d/cgroup.procs", MEMCG_PATH, ret);
1806 proc_set_process_info_memcg(process_info, ret);
1810 swap_args[0] = (unsigned long)pid;
1811 if (!swap_status(SWAP_CHECK_PID, swap_args) || !background) {
1812 _D("buf : %s, pid : %d, score : %d", buf, pid, oom_score_adj);
1813 f = fopen(buf, "w");
1815 _E("%s open failed", buf);
1818 size = sprintf(buf, "%d", pid);
1819 if (fwrite(buf, size, 1, f) != 1)
1820 _E("fwrite cgroup tasks : %d\n", pid);
1825 lowmem_swap_memory();
1829 static void lowmem_cgroup_foregrd_manage(int currentpid)
1832 struct proc_process_info_t *process_info =
1833 find_process_info(NULL, currentpid, NULL);
1838 gslist_for_each_item(iter, process_info->pids) {
1839 struct pid_info_t *pid_info = (struct pid_info_t *)(iter->data);
1841 if (pid_info->type == RESOURCED_APP_TYPE_GROUP)
1842 lowmem_move_memcgroup(pid_info->pid, OOMADJ_FOREGRD_UNLOCKED);
1846 static int oom_thread_create(void)
1848 int ret = RESOURCED_ERROR_NONE;
1851 _I("oom thread %u already created", (unsigned)oom_thread);
1853 /* initialize oom killer thread */
1854 ret = pthread_create(&oom_thread, NULL, (void *)lowmem_oom_killer_pthread, (void *)NULL);
1856 _E("pthread creation for lowmem_oom_killer_pthread failed, %d\n", ret);
1859 pthread_detach(oom_thread);
1866 static int lowmem_app_launch_cb(void *data)
1868 struct proc_status *p_data = (struct proc_status*)data;
1869 struct proc_process_info_t *process_info;
1871 ret_value_msg_if(p_data == NULL, RESOURCED_ERROR_FAIL,
1872 "Please provide valid argument!");
1873 process_info = (struct proc_process_info_t *)p_data->processinfo;
1875 if (process_info && !(process_info->type & PROC_LARGE_HEAP))
1876 lowmem_dynamic_process_killer(DYNAMIC_KILL_LUNCH);
1880 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
1881 int lowmem_init(void)
1883 int ret = RESOURCED_ERROR_NONE;
1885 ret = create_foreground_memcg();
1888 _E("create foreground memcgs failed");
1893 config_parse(MEM_CONF_FILE, load_mem_config, NULL);
1895 ret = oom_thread_create();
1897 _E("oom thread create failed\n");
1901 /* set default memcg value */
1904 _E("memory cgroup init failed");
1908 /* register threshold and event fd */
1909 ret = setup_eventfd();
1911 _E("eventfd setup failed");
1916 register_notifier(RESOURCED_NOTIFIER_APP_LAUNCH, lowmem_app_launch_cb);
1921 static int resourced_memory_control(void *data)
1923 int ret = RESOURCED_ERROR_NONE;
1924 struct lowmem_data_type *l_data;
1926 l_data = (struct lowmem_data_type *)data;
1927 switch(l_data->control_type) {
1928 case LOWMEM_MOVE_CGROUP:
1930 lowmem_move_memcgroup((pid_t)l_data->args[0], l_data->args[1]);
1932 case LOWMEM_MANAGE_FOREGROUND:
1934 lowmem_cgroup_foregrd_manage((pid_t)l_data->args[0]);
1941 static int resourced_memory_init(void *data)
1943 lowmem_ops = &memory_modules_ops;
1945 return lowmem_init();
1948 static int resourced_memory_finalize(void *data)
1950 unregister_notifier(RESOURCED_NOTIFIER_APP_LAUNCH, lowmem_app_launch_cb);
1951 return RESOURCED_ERROR_NONE;
1954 int lowmem_control(enum lowmem_control_type type, unsigned long *args)
1956 struct lowmem_data_type l_data;
1959 l_data.control_type = type;
1961 return lowmem_ops->control(&l_data);
1964 return RESOURCED_ERROR_NONE;
1967 static const struct module_ops memory_modules_ops = {
1968 .priority = MODULE_PRIORITY_NORMAL,
1970 .init = resourced_memory_init,
1971 .exit = resourced_memory_finalize,
1972 .control = resourced_memory_control,
1975 MODULE_REGISTER(&memory_modules_ops)