4 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file vmpressure-lowmem-handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
38 #include <sys/types.h>
41 #include <sys/sysinfo.h>
43 #include <sys/resource.h>
46 #include <eventsystem.h>
51 #include "lowmem-handler.h"
52 #include "proc-common.h"
55 #include "resourced.h"
58 #include "config-parser.h"
60 #include "swap-common.h"
62 #include "memory-cgroup.h"
63 #include "heart-common.h"
64 #include "proc-main.h"
65 #include "dbus-handler.h"
67 #include "fd-handler.h"
68 #include "resourced-helper-worker.h"
69 #include "safe-kill.h"
70 #include "dedup-common.h"
72 #define LOWMEM_THRES_INIT 0
74 #define MEMPS_EXEC_PATH "usr/bin/memps"
75 #define MEM_CONF_FILE RD_CONFIG_FILE(limiter)
76 #define MEM_SECTION "Memory"
77 #define MEM_VIP_SECTION "VIP_PROCESS"
78 #define MEM_VIP_PREDEFINE "PREDEFINE"
79 #define MEM_POPUP_SECTION "POPUP"
80 #define MEM_POPUP_STRING "oom_popup"
81 #define MEM_BG_RECLAIM_SECTION "BackgroundReclaim"
82 #define MEM_BG_RECLAIM_STRING "AfterScreenDim"
83 #define MEM_LOGGING_SECTION "Logging"
86 #define MAX_VICTIMS_BETWEEN_CHECK 3
87 #define MAX_PROACTIVE_LOW_VICTIMS 2
88 #define MAX_PROACTIVE_HIGH_VICTIMS 4
89 #define FOREGROUND_VICTIMS 1
90 #define OOM_TIMER_INTERVAL 2
91 #define OOM_KILLER_PRIORITY -20
92 #define THRESHOLD_MARGIN 10 /* MB */
94 #define MEM_SIZE_64 64 /* MB */
95 #define MEM_SIZE_256 256 /* MB */
96 #define MEM_SIZE_448 448 /* MB */
97 #define MEM_SIZE_512 512 /* MB */
98 #define MEM_SIZE_768 768 /* MB */
99 #define MEM_SIZE_1024 1024 /* MB */
100 #define MEM_SIZE_2048 2048 /* MB */
102 /* thresholds for 64M RAM*/
103 #define PROACTIVE_64_THRES 10 /* MB */
104 #define PROACTIVE_64_LEAVE 30 /* MB */
105 #define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
106 #define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
107 #define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
108 #define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
109 #define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
110 #define CGROUP_ROOT_64_NUM_VICTIMS 1
112 /* thresholds for 256M RAM */
113 #define PROACTIVE_256_THRES 50 /* MB */
114 #define PROACTIVE_256_LEAVE 80 /* MB */
115 #define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
116 #define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
117 #define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
118 #define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
119 #define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
120 #define CGROUP_ROOT_256_NUM_VICTIMS 2
122 /* threshold for 448M RAM */
123 #define PROACTIVE_448_THRES 80 /* MB */
124 #define PROACTIVE_448_LEAVE 100 /* MB */
125 #define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
126 #define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
127 #define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
128 #define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
129 #define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
130 #define CGROUP_ROOT_448_NUM_VICTIMS 5
132 /* threshold for 512M RAM */
133 #define PROACTIVE_512_THRES 100 /* MB */
134 #define PROACTIVE_512_LEAVE 80 /* MB */
135 #define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
136 #define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
137 #define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
138 #define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
139 #define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
140 #define CGROUP_ROOT_512_NUM_VICTIMS 5
142 /* threshold for 768 RAM */
143 #define PROACTIVE_768_THRES 100 /* MB */
144 #define PROACTIVE_768_LEAVE 130 /* MB */
145 #define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
146 #define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
147 #define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
148 #define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
149 #define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
150 #define CGROUP_ROOT_768_NUM_VICTIMS 5
152 /* threshold for more than 1024M RAM */
153 #define PROACTIVE_1024_THRES 230 /* MB */
154 #define PROACTIVE_1024_LEAVE 150 /* MB */
155 #define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
156 #define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
157 #define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
158 #define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
159 #define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
160 #define CGROUP_ROOT_1024_NUM_VICTIMS 5
162 /* threshold for more than 2048M RAM */
163 #define PROACTIVE_2048_THRES 200 /* MB */
164 #define PROACTIVE_2048_LEAVE 500 /* MB */
165 #define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
166 #define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
167 #define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
168 #define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
169 #define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
170 #define CGROUP_ROOT_2048_NUM_VICTIMS 10
172 /* threshold for more than 3072M RAM */
173 #define PROACTIVE_3072_THRES 300 /* MB */
174 #define PROACTIVE_3072_LEAVE 700 /* MB */
175 #define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
176 #define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
177 #define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
178 #define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
179 #define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
180 #define CGROUP_ROOT_3072_NUM_VICTIMS 10
182 static unsigned proactive_threshold;
183 static unsigned proactive_leave;
184 static unsigned lmk_start_threshold;
186 static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
189 * Resourced Low Memory Killer
190 * NOTE: planned to be moved to a separate file.
192 /*-------------------------------------------------*/
193 #define OOM_TIMER_INTERVAL_SEC 2
194 #define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
195 #define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
197 struct lowmem_control {
199 * For each queued request the following properties
200 * are required with two exceptions:
201 * - status is being set by LMK
202 * - callback is optional
204 /* Processing flags*/
206 /* Indictator for OOM score of targeted processes */
207 enum cgroup_type type;
209 /* Desired size to be restored - level to be reached (MB)*/
211 /* Max number of processes to be considered */
213 /* Memory reclaim status */
216 * Optional - if set, will be triggered by LMK once the request
219 void (*callback) (struct lowmem_control *);
222 struct lowmem_worker {
223 pthread_t worker_thread;
229 static struct lowmem_worker lmw;
231 static int memlog_enabled;
232 static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
233 /* remove logfiles to reduce to this threshold.
234 * it is about five-sixths of the memlog_nr_max. */
235 static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
236 static char *memlog_path = DEFAULT_MEMLOG_PATH;
237 static char *memlog_prefix[MEMLOG_MAX];
239 #define LOWMEM_WORKER_IS_ACTIVE(_lmw) g_atomic_int_get(&(_lmw)->active)
240 #define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
241 #define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
243 #define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
244 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
245 #define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
247 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
249 #define LOWMEM_DESTROY_REQUEST(_ctl) \
250 g_slice_free(typeof(*(_ctl)), _ctl); \
252 #define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb) \
254 (c)->flags = __flags; (c)->type = __type; \
255 (c)->size = __size; (c)->count = __count; \
256 (c)->callback = __cb; \
260 #define APP_ATTR_PATH "/proc/%d/attr/current"
262 static int get_privilege(pid_t pid, char *name, size_t len)
269 snprintf(path, sizeof(path), APP_ATTR_PATH, pid);
271 fp = fopen(path, "r");
275 attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
280 attr[attr_len] = '\0';
282 snprintf(name, len, "%s", attr);
286 static int is_app(pid_t pid)
292 ret = get_privilege(pid, attr, sizeof(attr));
294 _E("Failed to get privilege of PID(%d).", pid);
298 len = strlen(attr) + 1;
300 if (!strncmp("System", attr, len))
303 if (!strncmp("User", attr, len))
306 if (!strncmp("System::Privileged", attr, len))
313 static void lowmem_queue_request(struct lowmem_worker *lmw,
314 struct lowmem_control *ctl)
316 if (LOWMEM_WORKER_IS_ACTIVE(lmw))
317 g_async_queue_push(lmw->queue, ctl);
321 static void lowmem_drain_queue(struct lowmem_worker *lmw)
323 struct lowmem_control *ctl;
325 g_async_queue_lock(lmw->queue);
326 while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
329 LOWMEM_DESTROY_REQUEST(ctl);
331 g_async_queue_unlock(lmw->queue);
334 static void lowmem_request_destroy(gpointer data)
336 struct lowmem_control *ctl = (struct lowmem_control*) data;
340 LOWMEM_DESTROY_REQUEST(ctl);
343 /*-------------------------------------------------*/
345 /* low memory action function for cgroup */
346 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi);
347 /* low memory action function */
348 static void high_mem_act(void);
349 static void swap_activate_act(void);
350 static void swap_compact_act(void);
351 static void lmk_act(void);
354 static size_t cur_mem_state = MEM_LEVEL_HIGH;
355 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
356 static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
358 static unsigned long totalram;
359 static unsigned long ktotalram;
361 static struct module_ops memory_modules_ops;
362 static const struct module_ops *lowmem_ops;
363 static bool oom_popup_enable;
364 static bool oom_popup;
365 static bool memcg_swap_status;
366 static bool bg_reclaim;
367 static int fragmentation_size;
369 static const char *convert_cgroup_type_to_str(int type)
371 static const char *type_table[] =
372 {"/", "VIP", "High", "Medium", "Lowest"};
373 if (type >= CGROUP_ROOT && type <= CGROUP_LOW)
374 return type_table[type];
379 static const char *convert_status_to_str(int status)
381 static const char *status_table[] =
382 {"none", "done", "drop", "cont", "retry", "next_type"};
383 if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
384 return status_table[status];
385 return "error status";
388 static const char *convert_memstate_to_str(int mem_state)
390 static const char *state_table[] = {"mem normal", "mem dedup", "mem swap", "mem low",
392 if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
393 return state_table[mem_state];
397 static int lowmem_launch_oompopup(void)
399 GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
400 g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
402 GVariant *const params = g_variant_new("(a{ss})", gv_builder);
403 g_variant_builder_unref(gv_builder);
405 int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
406 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
407 "PopupLaunch", params);
409 g_variant_unref(params);
414 static inline void get_total_memory(void)
421 totalram = si.totalram;
422 ktotalram = BYTE_TO_KBYTE(totalram);
426 static int lowmem_mem_usage_uss(pid_t pid, unsigned int *usage)
428 unsigned int uss, zram = 0;
434 * In lowmem we need to know memory size of processes to
435 * for terminating apps. To get most real value of usage
436 * we should use USS + ZRAM usage for selected process.
438 * Those values will contain the most approximated amount
439 * of memory that will be freed after process termination.
441 ret = proc_get_uss(pid, &uss);
442 if (ret != RESOURCED_ERROR_NONE)
445 if (swap_get_state() == SWAP_ON) {
446 ret = proc_get_zram_usage(pid, &zram);
447 /* If we don't get zram usage, it's not a problem */
448 if (ret != RESOURCED_ERROR_NONE)
452 return RESOURCED_ERROR_NONE;
455 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
457 unsigned int size = 0, total_size = 0;
462 * If pids are allocated only when there are multiple processes with
463 * the same pgid e.g., browser and web process. Mostly, single process
466 if (tsk->pids == NULL) {
467 ret = proc_get_ram_usage(tsk->pid, &size);
469 /* If there is no proc entry for given pid the process
470 * should be abandoned during further processing
473 _D("failed to get rss memory usage of %d", tsk->pid);
478 for (index = 0; index < tsk->pids->len; index++) {
479 pid = g_array_index(tsk->pids, pid_t, index);
480 ret = proc_get_ram_usage(pid, &size);
481 if (ret != RESOURCED_ERROR_NONE)
489 static int memps_file_select(const struct dirent *entry)
491 return strstr(entry->d_name, "memps") ? 1 : 0;
494 static char *strrstr(const char *str, const char *token)
496 int len = strlen(token);
497 const char *p = str + strlen(str);
500 if (p[0] == token[0] && strncmp(p, token, len) == 0)
506 static int timesort(const struct dirent **a, const struct dirent **b)
512 ptr = strrstr((*a)->d_name, "_");
516 ptr = strrstr((*b)->d_name, "_");
520 return (time1 - time2);
523 static int clear_logs(void *data)
525 struct dirent **namelist;
529 char *dir = (char*)data;
533 return RESOURCED_ERROR_NONE;
536 return RESOURCED_ERROR_NONE;
539 if (len <= 0 || len >= sizeof fpath - 1) {
540 _E("Invalid parameter - Directory path is too short or too long");
541 return RESOURCED_ERROR_INVALID_PARAMETER;
544 n = scandir(dir, &namelist, memps_file_select, timesort);
546 _D("num of log files %d", n);
547 if (n <= memlog_nr_max) {
551 return RESOURCED_ERROR_NONE;
554 strncpy(fpath, dir, sizeof fpath - 1);
555 fpath[sizeof fpath - 1] = '\0';
559 len = sizeof fpath - len - 1;
560 for (i = 0; i < n; i++) {
561 if (i < n - memlog_remove_batch_thres) {
562 if (strlen(namelist[i]->d_name) > len - 1)
564 strncpy(fname, namelist[i]->d_name, len - 1);
565 fpath[sizeof fpath - 1] = '\0';
566 _D("remove log file %s", fpath);
569 _E("%s file cannot removed", fpath);
575 return RESOURCED_ERROR_NONE;
578 void make_memps_log(enum mem_log memlog, pid_t pid, char *victim_name)
582 char new_log[BUF_MAX];
583 static pid_t old_pid;
584 int oom_score_adj = 0, ret;
590 if (memlog < MEMLOG_MEMPS || memlog >= MEMLOG_MAX)
593 prefix = memlog_prefix[memlog];
602 if (localtime_r(&now, &cur_tm) == NULL) {
603 _E("Fail to get localtime");
607 snprintf(new_log, sizeof(new_log),
608 "%s/%s_%s_%d_%.4d%.2d%.2d%.2d%.2d%.2d", memlog_path, prefix, victim_name,
609 pid, (1900 + cur_tm.tm_year), 1 + cur_tm.tm_mon,
610 cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min,
613 ret = proc_get_oom_score_adj(pid, &oom_score_adj);
614 if (ret || oom_score_adj > OOMADJ_BACKGRD_LOCKED) {
616 _cleanup_fclose_ FILE *f = NULL;
618 f = fopen(new_log, "w");
620 _E("fail to create memps log %s", new_log);
623 proc_print_meninfo(f);
627 const char *argv[4] = {"/usr/bin/memps", "-f", NULL, NULL};
630 exec_cmd(ARRAY_SIZE(argv), argv);
633 /* best effort to limit the number of logfiles up to memlog_nr_max */
634 clear_logs(memlog_path);
637 static int lowmem_kill_victim(const struct task_info *tsk,
638 int flags, int memps_log, unsigned int *victim_size)
642 char appname[PATH_MAX];
644 struct proc_app_info *pai;
648 if (pid <= 0 || pid == getpid())
649 return RESOURCED_ERROR_FAIL;
651 ret = proc_get_cmdline(pid, appname, sizeof appname);
652 if (ret == RESOURCED_ERROR_FAIL)
653 return RESOURCED_ERROR_FAIL;
655 if (!strcmp("memps", appname) ||
656 !strcmp("crash-worker", appname) ||
657 !strcmp("system-syspopup", appname)) {
658 _E("%s(%d) was selected, skip it", appname, pid);
659 return RESOURCED_ERROR_FAIL;
663 make_memps_log(MEMLOG_MEMPS, pid, appname);
667 resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
668 pid, NULL, NULL, PROC_TYPE_NONE);
670 if (tsk->oom_score_lru <= OOMADJ_BACKGRD_LOCKED) {
672 } else if (tsk->oom_score_lru > OOMADJ_BACKGRD_LOCKED && tsk->oom_score_lru < OOMADJ_BACKGRD_UNLOCKED) {
673 int app_flag = pai->flags;
674 sigterm = app_flag & PROC_SIGTERM;
677 if (pai->memory.oom_killed)
680 pai->memory.oom_killed = true;
684 safe_kill(pid, SIGTERM);
686 safe_kill(pid, SIGKILL);
688 _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u, sigterm = %d\n",
689 flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
691 *victim_size = tsk->size;
693 if (tsk->oom_score_lru > OOMADJ_FOREGRD_UNLOCKED)
694 return RESOURCED_ERROR_NONE;
696 if (oom_popup_enable && !oom_popup) {
697 lowmem_launch_oompopup();
701 make_memps_log(MEMLOG_MEMPS, pid, appname);
703 return RESOURCED_ERROR_NONE;
706 /* return LOWMEM_RECLAIM_CONT when killing should be continued */
707 static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
709 unsigned int available;
712 * Processes with the priority higher than perceptible are killed
713 * only when the available memory is less than dynamic oom threshold.
715 if (tsk->oom_score_lru > OOMADJ_BACKGRD_PERCEPTIBLE)
716 return LOWMEM_RECLAIM_CONT;
718 if (flags & (OOM_FORCE|OOM_SINGLE_SHOT)) {
719 _I("[LMK] %d is dropped during force kill, flag=%d",
721 return LOWMEM_RECLAIM_DROP;
723 available = proc_get_mem_available();
724 if (available > lmk_start_threshold) {
725 _I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
726 available, lmk_start_threshold);
727 return LOWMEM_RECLAIM_RETRY;
729 return LOWMEM_RECLAIM_CONT;
732 static int compare_victims(const struct task_info *ta, const struct task_info *tb)
739 * followed by kernel badness point calculation using heuristic.
740 * oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
742 pa = ta->oom_score_lru * (ktotalram / 2000) + ta->size;
743 pb = tb->oom_score_lru * (ktotalram / 2000) + tb->size;
748 static void lowmem_free_task_info_array(GArray *array)
752 for (i = 0; i < array->len; i++) {
753 struct task_info *tsk;
755 tsk = &g_array_index(array, struct task_info, i);
757 g_array_free(tsk->pids, true);
760 g_array_free(array, true);
763 static inline int is_dynamic_process_killer(int flags)
765 return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
768 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
770 unsigned int available = proc_get_mem_available();
771 unsigned int should_be_freed = 0;
773 if (available < thres)
774 should_be_freed = thres - available;
776 * free THRESHOLD_MARGIN more than real should be freed,
777 * because launching app is consuming up the memory.
779 if (should_be_freed > 0)
780 should_be_freed += THRESHOLD_MARGIN;
784 return should_be_freed;
787 static int lowmem_get_pids_proc(GArray *pids)
790 struct dirent *dentry;
792 dp = opendir("/proc");
794 _E("fail to open /proc");
795 return RESOURCED_ERROR_FAIL;
797 while ((dentry = readdir(dp)) != NULL) {
798 struct task_info tsk;
799 pid_t pid = 0, pgid = 0;
802 if (!isdigit(dentry->d_name[0]))
805 pid = (pid_t)atoi(dentry->d_name);
807 /* skip invalid pids or kernel processes */
817 if (proc_get_oom_score_adj(pid, &oom) < 0) {
818 _D("pid(%d) was already terminated", pid);
822 /* VIP pids should be excluded from the LMK list */
823 if (cgroup_get_type(oom) == CGROUP_VIP)
827 * Check whether this array includes applications or not.
828 * If it doesn't require to get applications
829 * and pid has been already included in pai,
832 if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
836 * Currently, for tasks in the memory cgroup,
837 * do not consider multiple tasks with one pgid.
841 tsk.oom_score_adj = oom;
842 tsk.oom_score_lru = oom;
844 tsk.size = lowmem_get_task_mem_usage_rss(&tsk);
847 g_array_append_val(pids, tsk);
851 return RESOURCED_ERROR_NONE;
855 * @brief Terminate up to max_victims processes after finding them from pai.
856 It depends on proc_app_info lists
857 and it also reference systemservice cgroup
858 because some processes in this group don't have proc_app_info.
860 * @max_victims: max number of processes to be terminated
861 * @start_oom: find victims from start oom adj score value
862 * @end_oom: find victims to end oom adj score value
863 * @should_be_freed: amount of memory to be reclaimed (in MB)
864 * @total_size[out]: total size of possibly reclaimed memory (required)
865 * @completed: final outcome (optional)
866 * @threshold: desired value of memory available
868 static int lowmem_kill_victims(int max_victims,
869 int start_oom, int end_oom, unsigned should_be_freed, int flags,
870 unsigned int *total_size, int *completed, int threshold)
873 GSList *proc_app_list = NULL;
874 int i, ret, victim = 0;
875 unsigned int victim_size = 0;
876 unsigned int total_victim_size = 0;
877 int status = LOWMEM_RECLAIM_NONE;
878 GArray *candidates = NULL;
879 GSList *iter, *iterchild;
880 struct proc_app_info *pai = NULL;
882 int should_be_freed_kb = MBYTE_TO_KBYTE(should_be_freed);
884 candidates = g_array_new(false, false, sizeof(struct task_info));
886 proc_app_list = proc_app_list_open();
887 gslist_for_each_item(iter, proc_app_list) {
891 pai = (struct proc_app_info *)iter->data;
895 oom_score_adj = pai->memory.oom_score_adj;
896 if (oom_score_adj > end_oom || oom_score_adj < start_oom)
899 if ((flags & OOM_REVISE) && pai->memory.oom_killed)
902 ti.pid = pai->main_pid;
903 ti.pgid = getpgid(ti.pid);
904 ti.oom_score_adj = oom_score_adj;
908 * Before oom_score_adj of favourite (oom_score = 270) applications is
909 * independent of lru_state, now we consider lru_state, while
910 * killing favourite process.
913 if (oom_score_adj == OOMADJ_FAVORITE && pai->lru_state >= PROC_BACKGROUND)
914 ti.oom_score_lru = OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE * pai->lru_state;
916 ti.oom_score_lru = oom_score_adj;
919 ti.pids = g_array_new(false, false, sizeof(pid_t));
920 g_array_append_val(ti.pids, ti.pid);
921 gslist_for_each_item(iterchild, pai->childs) {
922 pid_t child = GPOINTER_TO_PID(iterchild->data);
923 g_array_append_val(ti.pids, child);
928 g_array_append_val(candidates, ti);
931 proc_app_list_close();
933 if (!candidates->len) {
934 status = LOWMEM_RECLAIM_NEXT_TYPE;
938 _D("[LMK] candidate ratio=%d/%d", candidates->len, total_count);
941 for (i = 0; i < candidates->len; i++) {
942 struct task_info *tsk;
944 tsk = &g_array_index(candidates, struct task_info, i);
945 tsk->size = lowmem_get_task_mem_usage_rss(tsk);
949 * In case of start_oom == OOMADJ_SU,
950 * we're going to try to kill some of processes in /proc
951 * to handle low memory situation.
952 * It can find malicious system process even though it has low oom score.
954 if (start_oom == OOMADJ_SU)
955 lowmem_get_pids_proc(candidates);
957 g_array_sort(candidates, (GCompareFunc)compare_victims);
959 for (i = 0; i < candidates->len; i++) {
960 struct task_info *tsk;
962 if (i >= max_victims) {
963 status = LOWMEM_RECLAIM_NEXT_TYPE;
968 * Available memory is checking only every
969 * num_vict_between_check process for reducing burden.
971 if (!(i % num_vict_between_check)) {
972 if (proc_get_mem_available() > threshold) {
973 status = LOWMEM_RECLAIM_DONE;
978 if (!(flags & OOM_NOMEMORY_CHECK) &&
979 total_victim_size >= should_be_freed_kb) {
980 _D("[LMK] victim=%d, max_victims=%d, total_size=%uKB",
981 victim, max_victims, total_victim_size);
982 status = LOWMEM_RECLAIM_DONE;
986 tsk = &g_array_index(candidates, struct task_info, i);
988 status = lowmem_check_kill_continued(tsk, flags);
989 if (status != LOWMEM_RECLAIM_CONT)
992 _I("[LMK] select victims from proc_app_list pid(%d) with oom_score_adj(%d)\n", tsk->pid, tsk->oom_score_adj);
994 ret = lowmem_kill_victim(tsk, flags, i, &victim_size);
995 if (ret != RESOURCED_ERROR_NONE)
998 total_victim_size += victim_size;
1002 lowmem_free_task_info_array(candidates);
1003 *total_size = total_victim_size;
1004 if(*completed != LOWMEM_RECLAIM_CONT)
1005 *completed = status;
1007 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
1011 static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
1013 if (type == CGROUP_VIP || type >= CGROUP_END || type <= CGROUP_TOP) {
1014 _E("cgroup type (%d) is out of scope", type);
1015 return RESOURCED_ERROR_FAIL;
1018 *max = cgroup_get_highest_oom_score_adj(type);
1019 *min = cgroup_get_lowest_oom_score_adj(type);
1021 return RESOURCED_ERROR_NONE;
1024 static void lowmem_handle_request(struct lowmem_control *ctl)
1026 int start_oom, end_oom;
1027 int count = 0, victim_cnt = 0;
1028 int max_victim_cnt = ctl->count;
1029 int status = LOWMEM_RECLAIM_NONE;
1030 unsigned int available = 0;
1031 unsigned int total_size = 0;
1032 unsigned int current_size = 0;
1033 unsigned int reclaim_size, shortfall = 0;
1034 enum cgroup_type cgroup_type = ctl->type;
1036 available = proc_get_mem_available();
1037 reclaim_size = ctl->size > available
1038 ? ctl->size - available : 0;
1040 if (!reclaim_size) {
1041 status = LOWMEM_RECLAIM_DONE;
1046 /* Prepare LMK to start doing it's job. Check preconditions. */
1047 if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
1050 lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1051 shortfall = is_memory_recovered(&available, ctl->size);
1053 if (!shortfall || !reclaim_size) {
1054 status = LOWMEM_RECLAIM_DONE;
1060 victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
1061 reclaim_size, ctl->flags, ¤t_size, &status, ctl->size);
1064 current_size = KBYTE_TO_MBYTE(current_size);
1065 reclaim_size -= reclaim_size > current_size
1066 ? current_size : reclaim_size;
1067 total_size += current_size;
1068 count += victim_cnt;
1069 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
1070 victim_cnt, current_size,
1071 start_oom, end_oom, convert_status_to_str(status));
1074 if ((status == LOWMEM_RECLAIM_DONE) ||
1075 (status == LOWMEM_RECLAIM_DROP) ||
1076 (status == LOWMEM_RECLAIM_RETRY))
1080 * If it doesn't finish reclaiming memory in first operation,
1081 - if flags has OOM_IN_DEPTH,
1082 try to find victims again in the active cgroup.
1083 otherwise, just return because there is no more victims in the desired cgroup.
1084 - if flags has OOM_REVISE,
1085 it means that resourced can't find victims from proc_app_list.
1086 So, it should search victims or malicious process from /proc.
1087 But searching /proc leads to abnormal behaviour.
1088 (Make sluggish or kill same victims continuously)
1089 Thus, otherwise, just return in first operation and wait some period.
1091 if (cgroup_type == CGROUP_LOW) {
1092 cgroup_type = CGROUP_MEDIUM;
1094 } else if ((cgroup_type == CGROUP_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
1095 cgroup_type = CGROUP_HIGH;
1096 if(ctl->flags & OOM_FORCE)
1097 max_victim_cnt = FOREGROUND_VICTIMS;
1099 } else if ((cgroup_type == CGROUP_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
1100 status = LOWMEM_RECLAIM_RETRY;
1101 ctl->type = CGROUP_ROOT;
1103 else if (cgroup_type == CGROUP_ROOT) {
1104 status = LOWMEM_RECLAIM_RETRY;
1107 _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
1108 count, total_size, reclaim_size, shortfall, convert_status_to_str(status));
1110 /* After we finish reclaiming it's worth to remove oldest memps logs */
1111 if (count && memlog_enabled)
1112 request_helper_worker(CLEAR_LOGS, memlog_path, clear_logs, NULL);
1113 ctl->status = status;
1116 static void *lowmem_reclaim_worker(void *arg)
1118 struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
1120 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
1122 g_async_queue_ref(lmw->queue);
1126 struct lowmem_control *ctl;
1128 LOWMEM_WORKER_IDLE(lmw);
1129 /* Wait on any wake-up call */
1130 ctl = g_async_queue_pop(lmw->queue);
1132 if (ctl->flags & OOM_DROP)
1133 LOWMEM_DESTROY_REQUEST(ctl);
1135 if (!LOWMEM_WORKER_IS_ACTIVE(lmw) || !ctl)
1138 LOWMEM_WORKER_RUN(lmw);
1140 _D("[LMK] %d tries", ++try_count);
1141 lowmem_handle_request(ctl);
1143 * Case the process failed to reclaim requested amount of memory
1144 * or still under have memory pressure - try the timeout wait.
1145 * There is a chance this will get woken-up in a better reality.
1147 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
1148 !(ctl->flags & OOM_SINGLE_SHOT)) {
1149 unsigned int available = proc_get_mem_available();
1151 if (available >= ctl->size) {
1152 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
1153 ctl->size, available);
1154 ctl->status = LOWMEM_RECLAIM_DONE;
1157 LOWMEM_DESTROY_REQUEST(ctl);
1158 LOWMEM_WORKER_IDLE(lmw);
1162 if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
1163 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
1164 ctl->flags |= OOM_REVISE;
1170 * The ctl callback would check available size again.
1171 * And it is last point in reclaiming worker.
1172 * Resourced sent SIGKILL signal to victim processes
1173 * so it should wait for a some seconds until each processes returns memory.
1175 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
1179 /* The lmk becomes the owner of all queued requests .. */
1180 LOWMEM_DESTROY_REQUEST(ctl);
1181 LOWMEM_WORKER_IDLE(lmw);
1183 g_async_queue_unref(lmw->queue);
1187 static void change_lowmem_state(unsigned int mem_state)
1189 cur_mem_state = mem_state;
1190 lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1192 resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
1193 (void *)&cur_mem_state);
1196 /* only app can call this function
1197 * that is, service cannot call the function
1199 static void lowmem_swap_memory(char *path)
1201 unsigned int available;
1203 if (cur_mem_state == MEM_LEVEL_HIGH)
1206 if (swap_get_state() != SWAP_ON)
1209 available = proc_get_mem_available();
1210 if (cur_mem_state != MEM_LEVEL_LOW &&
1211 available <= get_root_memcg_info()->threshold[MEM_LEVEL_LOW])
1212 swap_activate_act();
1214 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1215 memcg_swap_status = true;
1218 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
1222 int lowest_oom_score_adj;
1225 _E("[SWAP] Unknown memory cgroup path to swap");
1229 /* In this case, corresponding process will be moved to memory CGROUP_LOW.
1232 error = proc_get_oom_score_adj(pid, &oom_score_adj);
1234 _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
1238 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(CGROUP_LOW);
1240 if (oom_score_adj < lowest_oom_score_adj) {
1241 oom_score_adj = lowest_oom_score_adj;
1242 /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
1243 proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
1248 /* Correponding process is already managed per app or service.
1249 * In addition, if some process is already located in the CGROUP_LOW, then just do swap
1251 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1254 static void memory_level_send_system_event(int lv)
1260 case MEM_LEVEL_HIGH:
1261 case MEM_LEVEL_MEDIUM:
1263 str = EVT_VAL_MEMORY_NORMAL;
1265 case MEM_LEVEL_CRITICAL:
1266 str = EVT_VAL_MEMORY_SOFT_WARNING;
1269 str = EVT_VAL_MEMORY_HARD_WARNING;
1272 _E("Invalid state");
1276 b = bundle_create();
1278 _E("Failed to create bundle");
1282 bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
1283 eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
1287 static void high_mem_act(void)
1291 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1293 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1294 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1295 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1296 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1297 memory_level_send_system_event(MEM_LEVEL_HIGH);
1300 change_lowmem_state(MEM_LEVEL_HIGH);
1302 if (swap_get_state() == SWAP_ON && memcg_swap_status) {
1303 resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(CGROUP_LOW));
1304 memcg_swap_status = false;
1306 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1307 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1308 (void *)CGROUP_FREEZER_ENABLED);
1311 static void swap_activate_act(void)
1315 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1317 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1319 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1320 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1321 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1322 memory_level_send_system_event(MEM_LEVEL_LOW);
1324 change_lowmem_state(MEM_LEVEL_LOW);
1325 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1326 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1327 (void *)CGROUP_FREEZER_ENABLED);
1329 if (swap_get_state() != SWAP_ON)
1330 resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
1333 static void dedup_act(enum ksm_scan_mode mode)
1338 if (dedup_get_state() != DEDUP_ONE_SHOT)
1341 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1342 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1343 (void *)CGROUP_FREEZER_ENABLED);
1345 if (mode == KSM_SCAN_PARTIAL) {
1346 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1348 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1350 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1351 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1352 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1353 memory_level_send_system_event(MEM_LEVEL_MEDIUM);
1355 change_lowmem_state(MEM_LEVEL_MEDIUM);
1357 data = KSM_SCAN_PARTIAL;
1358 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1359 } else if (mode == KSM_SCAN_FULL) {
1360 data = KSM_SCAN_FULL;
1361 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1365 static void swap_compact_act(void)
1367 change_lowmem_state(MEM_LEVEL_CRITICAL);
1368 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
1369 memory_level_send_system_event(MEM_LEVEL_CRITICAL);
1372 static void medium_cb(struct lowmem_control *ctl)
1374 if (ctl->status == LOWMEM_RECLAIM_DONE)
1376 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1379 static void lmk_act(void)
1381 unsigned int available;
1383 int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
1386 * Don't trigger reclaim worker
1387 * if it is already running
1389 if (LOWMEM_WORKER_IS_RUNNING(&lmw))
1392 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1394 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1396 memory_level_send_system_event(MEM_LEVEL_OOM);
1397 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
1398 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
1399 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1400 (void *)CGROUP_FREEZER_PAUSED);
1401 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1402 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1404 available = proc_get_mem_available();
1406 change_lowmem_state(MEM_LEVEL_OOM);
1408 if (available < get_root_memcg_info()->threshold_leave) {
1409 struct lowmem_control *ctl;
1411 ctl = LOWMEM_NEW_REQUEST();
1413 LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
1414 CGROUP_LOW, get_root_memcg_info()->threshold_leave,
1415 num_max_victims, medium_cb);
1416 lowmem_queue_request(&lmw, ctl);
1420 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
1423 * Flush resourced memory such as other processes.
1424 * Resourced can use both many fast bins and sqlite3 cache memery.
1431 static void lowmem_trigger_memory_state_action(int mem_state)
1434 * Check if the state we want to set is different from current
1435 * But it should except this condition if mem_state is already medium.
1436 * Otherwise, recalim worker couldn't run any more.
1438 if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
1441 switch (mem_state) {
1442 case MEM_LEVEL_HIGH:
1445 case MEM_LEVEL_MEDIUM:
1446 dedup_act(KSM_SCAN_PARTIAL);
1449 swap_activate_act();
1451 case MEM_LEVEL_CRITICAL:
1452 dedup_act(KSM_SCAN_FULL);
1463 static void lowmem_dump_cgroup_procs(struct memcg_info *mi)
1468 GArray *pids_array = NULL;
1470 cgroup_get_pids(mi->name, &pids_array);
1472 for (i = 0; i < pids_array->len; i++) {
1473 pid = g_array_index(pids_array, pid_t, i);
1474 lowmem_mem_usage_uss(pid, &size);
1475 _I("pid = %d, size = %u KB", pid, size);
1477 g_array_free(pids_array, true);
1480 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi)
1482 struct lowmem_control *ctl;
1484 /* To Do: only start to kill fg victim when no pending fg victim */
1485 lowmem_dump_cgroup_procs(mi);
1487 ctl = LOWMEM_NEW_REQUEST();
1489 LOWMEM_SET_REQUEST(ctl, OOM_SINGLE_SHOT | OOM_IN_DEPTH, type,
1490 mi->oomleave, num_max_victims, NULL);
1491 lowmem_queue_request(&lmw, ctl);
1495 static unsigned int check_mem_state(unsigned int available)
1498 for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
1499 if (mem_state != MEM_LEVEL_OOM && available <= get_root_memcg_info()->threshold[mem_state])
1501 else if (mem_state == MEM_LEVEL_OOM && available <= lmk_start_threshold)
1508 /*static int load_bg_reclaim_config(struct parse_result *result, void *user_data)
1511 return RESOURCED_ERROR_INVALID_PARAMETER;
1513 if (strncmp(result->section, MEM_BG_RECLAIM_SECTION, strlen(MEM_BG_RECLAIM_SECTION)+1))
1514 return RESOURCED_ERROR_NONE;
1516 if (!strncmp(result->name, MEM_BG_RECLAIM_STRING, strlen(MEM_BG_RECLAIM_STRING)+1)) {
1517 if (!strncmp(result->value, "yes", strlen("yes")+1))
1519 else if (!strncmp(result->value, "no", strlen("no")+1))
1524 return RESOURCED_ERROR_NONE;
1527 static int load_popup_config(struct parse_result *result, void *user_data)
1530 return RESOURCED_ERROR_INVALID_PARAMETER;
1532 if (strncmp(result->section, MEM_POPUP_SECTION, strlen(MEM_POPUP_SECTION)+1))
1533 return RESOURCED_ERROR_NONE;
1535 if (!strncmp(result->name, MEM_POPUP_STRING, strlen(MEM_POPUP_STRING)+1)) {
1536 if (!strncmp(result->value, "yes", strlen("yes")+1))
1537 oom_popup_enable = true;
1538 else if (!strncmp(result->value, "no", strlen("no")+1))
1539 oom_popup_enable = false;
1543 return RESOURCED_ERROR_NONE;
1546 static int load_mem_log_config(struct parse_result *result, void *user_data)
1549 return RESOURCED_ERROR_INVALID_PARAMETER;
1551 if (strncmp(result->section, MEM_LOGGING_SECTION, strlen(MEM_LOGGING_SECTION)+1))
1552 return RESOURCED_ERROR_NONE;
1554 if (!strncmp(result->name, "Enable", strlen("Enable")+1)) {
1555 memlog_enabled = atoi(result->value);
1556 } else if (!strncmp(result->name, "LogPath", strlen("LogPath")+1)) {
1557 memlog_path = strdup(result->value);
1558 } else if (!strncmp(result->name, "MaxNumLogfile", strlen("MaxNumLogfile")+1)) {
1559 memlog_nr_max = atoi(result->value);
1560 memlog_remove_batch_thres = (memlog_nr_max * 5) / 6;
1561 } else if (!strncmp(result->name, "PrefixMemps", strlen("PrefixMemps")+1)) {
1562 memlog_prefix[MEMLOG_MEMPS] = strdup(result->value);
1563 } else if (!strncmp(result->name, "PrefixMempsMemLimit", strlen("PrefixMempsMemLimit")+1)) {
1564 memlog_prefix[MEMLOG_MEMPS_MEMLIMIT] = strdup(result->value);
1567 return RESOURCED_ERROR_NONE;
1570 static int set_memory_config(struct parse_result *result, void *user_data)
1573 return RESOURCED_ERROR_NONE;
1575 if (strncmp(result->section, MEM_SECTION, strlen(MEM_SECTION)+1))
1576 return RESOURCED_ERROR_NONE;
1578 if (!strncmp(result->name, "ThresholdDedup", strlen("ThresholdDedup")+1)) {
1579 int value = atoi(result->value);
1580 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, value);
1581 } else if (!strncmp(result->name, "ThresholdSwap", strlen("ThresholdSwap")+1)) {
1582 int value = atoi(result->value);
1583 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, value);
1584 } else if (!strncmp(result->name, "ThresholdLow", strlen("ThresholdLow")+1)) {
1585 int value = atoi(result->value);
1586 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, value);
1587 } else if (!strncmp(result->name, "ThresholdMedium", strlen("ThresholdMedium")+1)) {
1588 int value = atoi(result->value);
1589 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, value);
1590 } else if (!strncmp(result->name, "ThresholdLeave", strlen("ThresholdLeave")+1)) {
1591 int value = atoi(result->value);
1592 memcg_set_leave_threshold(CGROUP_ROOT, value);
1593 } else if (!strncmp(result->name, "ThresholdRatioDedup", strlen("ThresholdRatioDedup")+1)) {
1594 double ratio = atoi(result->value);
1595 int value = (double)totalram * ratio / 100.0;
1596 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, BYTE_TO_MBYTE(value));
1597 } else if (!strncmp(result->name, "ThresholdRatioSwap", strlen("ThresholdRatioSwap")+1)) {
1598 double ratio = atoi(result->value);
1599 int value = (double)totalram * ratio / 100.0;
1600 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, BYTE_TO_MBYTE(value));
1601 } else if (!strncmp(result->name, "ThresholdRatioLow", strlen("ThresholdRatioLow")+1)) {
1602 double ratio = atoi(result->value);
1603 int value = (double)totalram * ratio / 100.0;
1604 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, BYTE_TO_MBYTE(value));
1605 } else if (!strncmp(result->name, "ThresholdRatioMedium", strlen("ThresholdRatioMedium")+1)) {
1606 double ratio = atoi(result->value);
1607 int value = (double)totalram * ratio / 100.0;
1608 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, BYTE_TO_MBYTE(value));
1609 } else if (!strncmp(result->name, "ThresholdRatioLeave", strlen("ThresholdRatioLeave")+1)) {
1610 double ratio = atoi(result->value);
1611 int value = (double)totalram * ratio / 100.0;
1612 memcg_set_leave_threshold(CGROUP_ROOT, BYTE_TO_MBYTE(value));
1613 } else if (!strncmp(result->name, "ForegroundRatio", strlen("ForegroundRatio")+1)) {
1614 float ratio = atof(result->value);
1615 memcg_info_set_limit(get_memcg_info(CGROUP_HIGH), ratio, totalram);
1616 } else if (!strncmp(result->name, "BackgroundRatio", strlen("BackgroundRatio")+1)) {
1617 float ratio = atof(result->value);
1618 memcg_info_set_limit(get_memcg_info(CGROUP_MEDIUM), ratio, totalram);
1619 } else if (!strncmp(result->name, "LowRatio", strlen("LowRatio")+1)) {
1620 float ratio = atof(result->value);
1621 memcg_info_set_limit(get_memcg_info(CGROUP_LOW), ratio, totalram);
1622 } else if (!strncmp(result->name, "NumMaxVictims", strlen("NumMaxVictims")+1)) {
1623 int value = atoi(result->value);
1624 num_max_victims = value;
1625 num_vict_between_check = value > MAX_MEMORY_CGROUP_VICTIMS/2
1626 ? 3 : value > MAX_MEMORY_CGROUP_VICTIMS/4
1628 } else if (!strncmp(result->name, "ProactiveThreshold", strlen("ProactiveThreshold")+1)) {
1629 int value = atoi(result->value);
1630 proactive_threshold = value;
1631 } else if (!strncmp(result->name, "ProactiveLeave", strlen("ProactiveLeave")+1)) {
1632 int value = atoi(result->value);
1633 proactive_leave = value;
1634 } else if (!strncmp(result->name, "EventLevel", strlen("EventLevel")+1)) {
1635 if (strncmp(event_level, result->value, strlen(event_level)))
1636 event_level = strdup(result->value);
1638 return RESOURCED_ERROR_OUT_OF_MEMORY;
1639 } else if (!strncmp(result->name, "SWAPPINESS", strlen("SWAPPINESS")+1)) {
1640 int value = atoi(result->value);
1641 memcg_set_default_swappiness(value);
1642 memcg_info_set_swappiness(get_memcg_info(CGROUP_ROOT), value);
1643 } else if (!strncmp(result->name, "FOREGROUND_SWAPPINESS", strlen("FOREGROUND_SWAPPINESS")+1)) {
1644 int value = atoi(result->value);
1645 memcg_info_set_swappiness(get_memcg_info(CGROUP_HIGH), value);
1646 } else if (!strncmp(result->name, "BACKGROUND_SWAPPINESS", strlen("BACKGROUND_SWAPPINESS")+1)) {
1647 int value = atoi(result->value);
1648 memcg_info_set_swappiness(get_memcg_info(CGROUP_MEDIUM), value);
1649 } else if (!strncmp(result->name, "LOW_SWAPPINESS", strlen("LOW_SWAPPINESS")+1)) {
1650 int value = atoi(result->value);
1651 memcg_info_set_swappiness(get_memcg_info(CGROUP_LOW), value);
1652 } else if (!strncmp(result->name, "NumFragSize", strlen("NumFragSize")+1)) {
1653 fragmentation_size = atoi(result->value);
1656 return RESOURCED_ERROR_NONE;
1659 /* setup memcg parameters depending on total ram size. */
1660 static void setup_memcg_params(void)
1662 unsigned long long total_ramsize;
1665 total_ramsize = BYTE_TO_MBYTE(totalram);
1667 _D("Total: %llu MB", total_ramsize);
1668 if (total_ramsize <= MEM_SIZE_64) {
1669 /* set thresholds for ram size 64M */
1670 proactive_threshold = PROACTIVE_64_THRES;
1671 proactive_leave = PROACTIVE_64_LEAVE;
1672 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
1673 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
1674 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
1675 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
1676 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
1677 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
1678 } else if (total_ramsize <= MEM_SIZE_256) {
1679 /* set thresholds for ram size 256M */
1680 proactive_threshold = PROACTIVE_256_THRES;
1681 proactive_leave = PROACTIVE_256_LEAVE;
1682 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
1683 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
1684 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
1685 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
1686 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
1687 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
1688 } else if (total_ramsize <= MEM_SIZE_448) {
1689 /* set thresholds for ram size 448M */
1690 proactive_threshold = PROACTIVE_448_THRES;
1691 proactive_leave = PROACTIVE_448_LEAVE;
1692 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
1693 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
1694 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
1695 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1696 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1697 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1698 } else if (total_ramsize <= MEM_SIZE_512) {
1699 /* set thresholds for ram size 512M */
1700 proactive_threshold = PROACTIVE_512_THRES;
1701 proactive_leave = PROACTIVE_512_LEAVE;
1702 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1703 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1704 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1705 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1706 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1707 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1708 } else if (total_ramsize <= MEM_SIZE_768) {
1709 /* set thresholds for ram size 512M */
1710 proactive_threshold = PROACTIVE_768_THRES;
1711 proactive_leave = PROACTIVE_768_LEAVE;
1712 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1713 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1714 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1715 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1716 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1717 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1718 } else if (total_ramsize <= MEM_SIZE_1024) {
1719 /* set thresholds for ram size more than 1G */
1720 proactive_threshold = PROACTIVE_1024_THRES;
1721 proactive_leave = PROACTIVE_1024_LEAVE;
1722 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1723 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1724 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1725 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1726 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1727 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1728 } else if (total_ramsize <= MEM_SIZE_2048) {
1729 proactive_threshold = PROACTIVE_2048_THRES;
1730 proactive_leave = PROACTIVE_2048_LEAVE;
1731 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1732 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1733 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1734 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1735 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1736 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1738 proactive_threshold = PROACTIVE_3072_THRES;
1739 proactive_leave = PROACTIVE_3072_LEAVE;
1740 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1741 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1742 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1743 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1744 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1745 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1750 static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
1752 int cur_oom_score_adj;
1754 struct memcg_info *mi;
1755 int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
1757 if(next_memcg_idx < CGROUP_VIP || next_memcg_idx > CGROUP_LOW) {
1758 _E("cgroup type (%d) should not be called", next_memcg_idx);
1761 mi = get_memcg_info(next_memcg_idx);
1768 cgroup_write_pid_fullpath(mi->name, pid);
1773 if (pai->main_pid == pid) {
1774 cur_oom_score_adj = pai->memory.oom_score_adj;
1775 cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
1777 /* -1 means that this pid is not yet registered at the memory cgroup
1778 * plz, reference proc_create_app_info function
1780 if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
1781 /* VIP processes should not be asked to move. */
1782 if (cur_memcg_idx <= CGROUP_VIP) {
1783 _I("[DEBUG] pid: %d, name: %s, cur_oom_score_adj: %d", pid, pai->appid, cur_oom_score_adj);
1784 _E("[DEBUG] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
1789 _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
1791 if (cur_oom_score_adj == next_oom_score_adj) {
1792 _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
1796 proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
1798 if (!lowmem_limit_move_cgroup(pai))
1801 if(cur_memcg_idx == next_memcg_idx)
1804 cgroup_write_pid_fullpath(mi->name, pid);
1805 if (next_memcg_idx == CGROUP_LOW)
1806 lowmem_swap_memory(get_memcg_info(CGROUP_LOW)->name);
1810 if (pai->memory.use_mem_limit)
1813 cgroup_write_pid_fullpath(mi->name, pid);
1817 static int lowmem_activate_worker(void)
1819 int ret = RESOURCED_ERROR_NONE;
1821 if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1825 lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1827 _E("Failed to create request queue\n");
1828 return RESOURCED_ERROR_FAIL;
1830 LOWMEM_WORKER_ACTIVATE(&lmw);
1831 ret = pthread_create(&lmw.worker_thread, NULL,
1832 (void *)lowmem_reclaim_worker, (void *)&lmw);
1834 LOWMEM_WORKER_DEACTIVATE(&lmw);
1835 _E("Failed to create LMK thread: %d\n", ret);
1837 pthread_detach(lmw.worker_thread);
1838 ret = RESOURCED_ERROR_NONE;
1843 static void lowmem_deactivate_worker(void)
1845 struct lowmem_control *ctl;
1847 if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1850 LOWMEM_WORKER_DEACTIVATE(&lmw);
1851 lowmem_drain_queue(&lmw);
1853 ctl = LOWMEM_NEW_REQUEST();
1855 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1858 ctl->flags = OOM_DROP;
1859 g_async_queue_push(lmw.queue, ctl);
1860 g_async_queue_unref(lmw.queue);
1863 static int lowmem_press_eventfd_read(int fd)
1865 uint64_t dummy_state;
1867 return read(fd, &dummy_state, sizeof(dummy_state));
1870 static void lowmem_press_root_cgroup_handler(void)
1872 static unsigned int prev_available;
1873 unsigned int available;
1876 available = proc_get_mem_available();
1877 if (prev_available == available)
1880 mem_state = check_mem_state(available);
1881 lowmem_trigger_memory_state_action(mem_state);
1883 prev_available = available;
1886 static void lowmem_press_cgroup_handler(enum cgroup_type type, struct memcg_info *mi)
1888 unsigned int usage, threshold;
1891 ret = memcg_get_anon_usage(mi->name, &usage);
1893 _D("getting anonymous memory usage fails");
1897 threshold = mi->threshold[MEM_LEVEL_OOM];
1898 if (usage >= threshold)
1899 memory_cgroup_proactive_lmk_act(type, mi);
1901 _I("anon page %u MB < medium threshold %u MB", BYTE_TO_MBYTE(usage),
1902 BYTE_TO_MBYTE(threshold));
1905 static bool lowmem_press_eventfd_handler(int fd, void *data)
1907 struct memcg_info *mi;
1908 enum cgroup_type type = CGROUP_ROOT;
1910 // FIXME: probably shouldn't get ignored
1911 if (lowmem_press_eventfd_read(fd) < 0)
1912 _E("Failed to read lowmem press event, %m\n");
1914 for (type = CGROUP_ROOT; type < CGROUP_END; type++) {
1915 if (!get_cgroup_tree(type) || !get_memcg_info(type))
1917 mi = get_memcg_info(type);
1918 if (fd == mi->evfd) {
1919 /* call low memory handler for this memcg */
1920 if (type == CGROUP_ROOT)
1921 lowmem_press_root_cgroup_handler();
1923 lowmem_press_cgroup_handler(type, mi);
1932 static int lowmem_press_register_eventfd(struct memcg_info *mi)
1935 const char *name = mi->name;
1936 static fd_handler_h handler;
1938 if (mi->threshold[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
1941 evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
1945 int saved_errno = errno;
1946 _E("fail to register event press fd %s cgroup", name);
1947 return -saved_errno;
1952 _I("register event fd success for %s cgroup", name);
1953 add_fd_read_handler(evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
1957 static int lowmem_press_setup_eventfd(void)
1961 for (i = CGROUP_ROOT; i < CGROUP_END; i++) {
1962 if (!get_use_hierarchy(i))
1965 lowmem_press_register_eventfd(get_memcg_info(i));
1967 return RESOURCED_ERROR_NONE;
1970 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1972 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1975 int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold)
1977 struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1982 flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1983 victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1984 type = type > 0 ? type : CGROUP_LOW;
1985 threshold = threshold > 0 ? threshold : get_root_memcg_info()->threshold_leave;
1987 lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1988 LOWMEM_SET_REQUEST(ctl, flags,
1989 type, threshold, victims,
1990 lowmem_force_reclaim_cb);
1991 lowmem_queue_request(&lmw, ctl);
1996 void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size)
2000 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
2001 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2003 size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
2004 _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
2005 lowmem_trigger_reclaim(0, victims, type, size);
2008 bool lowmem_fragmentated(void)
2010 struct buddyinfo bi;
2013 ret = proc_get_buddyinfo("Normal", &bi);
2018 * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
2019 * If total buddy pages is smaller than fragmentation_size,
2020 * resourced will detect kernel memory is fragmented.
2021 * Default value is zero in low memory device.
2023 if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
2024 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
2025 _I("fragmentation detected, need to execute proactive oom killer");
2031 static void lowmem_proactive_oom_killer(int flags, char *appid)
2033 unsigned int before;
2036 before = proc_get_mem_available();
2038 /* If memory state is medium or normal, just return and kill in oom killer */
2039 if (before < get_root_memcg_info()->threshold[MEM_LEVEL_OOM] || before > proactive_leave)
2042 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
2043 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2045 #ifdef HEART_SUPPORT
2047 * This branch is used only when HEART module is compiled in and
2048 * it's MEMORY module must be enabled. Otherwise this is skipped.
2050 struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
2052 unsigned int rss, after, size;
2054 rss = KBYTE_TO_MBYTE(md->avg_rss);
2058 after = before - rss;
2060 * after launching app, ensure that available memory is
2061 * above threshold_leave
2063 if (after >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2066 if (proactive_threshold - rss >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2067 size = proactive_threshold;
2069 size = rss + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
2071 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
2073 lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
2080 * When there is no history data for the launching app,
2081 * it is necessary to check current fragmentation state or application manifest file.
2082 * So, resourced feels proactive LMK is required, run oom killer based on dynamic
2085 if (lowmem_fragmentated())
2089 * run proactive oom killer only when available is larger than
2090 * dynamic process threshold
2092 if (!proactive_threshold || before >= proactive_threshold)
2095 if (!(flags & PROC_LARGEMEMORY))
2100 * free THRESHOLD_MARGIN more than real should be freed,
2101 * because launching app is consuming up the memory.
2103 _D("Run threshold based proactive LMK: memory level to reach: %u\n",
2104 proactive_leave + THRESHOLD_MARGIN);
2105 lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
2108 unsigned int lowmem_get_proactive_thres(void)
2110 return proactive_threshold;
2113 static int lowmem_prelaunch_handler(void *data)
2115 struct proc_status *ps = (struct proc_status *)data;
2116 struct proc_app_info *pai = ps->pai;
2118 if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
2119 return RESOURCED_ERROR_NONE;
2121 lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
2122 return RESOURCED_ERROR_NONE;
2125 int lowmem_control_handler(void *data)
2127 struct lowmem_control_data *lowmem_data;
2129 lowmem_data = (struct lowmem_control_data *)data;
2130 switch (lowmem_data->control_type) {
2131 case LOWMEM_MOVE_CGROUP:
2132 lowmem_move_memcgroup((pid_t)lowmem_data->pid,
2133 lowmem_data->oom_score_adj, lowmem_data->pai);
2138 return RESOURCED_ERROR_NONE;
2141 static int lowmem_bg_reclaim_handler(void *data)
2143 if (swap_get_state() != SWAP_ON)
2144 return RESOURCED_ERROR_NONE;
2147 return RESOURCED_ERROR_NONE;
2150 * Proactively reclaiming memory used by long-lived background processes
2151 * (such as widget instances) may be efficient on devices with limited
2152 * memory constraints. The pages used by such processes could be reclaimed
2153 * (if swap is enabled) earlier than they used to while minimizing the
2154 * impact on the user experience.
2156 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, get_memcg_info(CGROUP_MEDIUM)->name);
2158 return RESOURCED_ERROR_NONE;
2161 static void load_configs(const char *path)
2163 /* if (config_parse(path, set_memory_config, NULL))
2164 _E("(%s-mem) parse Fail", path);
2166 if (config_parse(path, load_popup_config, NULL))
2167 _E("(%s-popup) parse Fail", path);
2169 if (config_parse(path, load_bg_reclaim_config, NULL))
2170 _E("(%s-bg-reclaim) parse Fail", path);
2172 if (config_parse(path, load_mem_log_config, NULL))
2173 _E("(%s-mem-log) parse Fail", path);*/
2178 static void print_mem_configs(void)
2180 /* print info of Memory section */
2181 for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++)
2182 _I("set threshold for state '%s' to %u MB",
2183 convert_memstate_to_str(mem_lvl), get_root_memcg_info()->threshold[mem_lvl]);
2185 _I("set number of max victims as %d", num_max_victims);
2186 _I("set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
2187 _I("set proactive threshold to %u MB", proactive_threshold);
2188 _I("set proactive low memory killer leave to %u MB", proactive_leave);
2190 /* print info of POPUP section */
2191 _I("oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
2193 /* print info of BackgroundReclaim section */
2194 _I("Background reclaim is %s", bg_reclaim == true ? "enabled" : "disabled");
2196 /* print info of Logging section */
2197 _I("memory logging is %s", memlog_enabled == 1 ? "enabled" : "disabled");
2198 _I("memory logging path is %s", memlog_path);
2199 _I("the max number of memory logging is %d", memlog_nr_max);
2200 _I("the batch threshold of memory log is %d", memlog_remove_batch_thres);
2201 _I("prefix of memps is %s", memlog_prefix[MEMLOG_MEMPS]);
2202 _I("prefix of memlimit memps is %s", memlog_prefix[MEMLOG_MEMPS_MEMLIMIT]);
2205 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
2206 static int lowmem_init(void)
2208 int ret = RESOURCED_ERROR_NONE;
2210 _D("resourced memory init start");
2213 ret = cgroup_make_full_subdir(MEMCG_PATH);
2214 ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
2215 memcg_params_init();
2217 setup_memcg_params();
2219 /* default configuration */
2220 load_configs(MEM_CONF_FILE);
2222 /* this function should be called after parsing configurations */
2223 memcg_write_params();
2224 print_mem_configs();
2226 /* make a worker thread called low memory killer */
2227 ret = lowmem_activate_worker();
2229 _E("oom thread create failed\n");
2233 /* register threshold and event fd */
2234 ret = lowmem_press_setup_eventfd();
2236 _E("eventfd setup failed");
2241 lowmem_limit_init();
2242 lowmem_system_init();
2244 register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2245 register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2246 register_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2251 static int lowmem_exit(void)
2253 if (strncmp(event_level, MEMCG_DEFAULT_EVENT_LEVEL, sizeof(MEMCG_DEFAULT_EVENT_LEVEL)))
2256 lowmem_deactivate_worker();
2257 lowmem_limit_exit();
2258 lowmem_system_exit();
2260 unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2261 unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2262 unregister_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2264 return RESOURCED_ERROR_NONE;
2267 static int resourced_memory_init(void *data)
2269 lowmem_ops = &memory_modules_ops;
2270 return lowmem_init();
2273 static int resourced_memory_finalize(void *data)
2275 return lowmem_exit();
2278 void lowmem_change_memory_state(int state, int force)
2285 unsigned int available = proc_get_mem_available();
2286 mem_state = check_mem_state(available);
2289 lowmem_trigger_memory_state_action(mem_state);
2292 unsigned long lowmem_get_ktotalram(void)
2297 unsigned long lowmem_get_totalram(void)
2302 void lowmem_restore_memcg(struct proc_app_info *pai)
2306 struct cgroup *cgroup = NULL;
2307 struct memcg_info *mi = NULL;
2308 pid_t pid = pai->main_pid;
2310 ret = cgroup_pid_get_path("memory", pid, &cgpath);
2314 for (index = CGROUP_END-1; index >= CGROUP_ROOT; index--) {
2315 cgroup = get_cgroup_tree(index);
2319 mi = cgroup->memcg_info;
2323 if (!strcmp(cgroup->hashname, ""))
2325 if (strstr(cgpath, cgroup->hashname))
2328 pai->memory.memcg_idx = index;
2329 pai->memory.memcg_info = mi;
2330 if(strstr(cgpath, pai->appid))
2331 pai->memory.use_mem_limit = true;
2336 static struct module_ops memory_modules_ops = {
2337 .priority = MODULE_PRIORITY_HIGH,
2339 .init = resourced_memory_init,
2340 .exit = resourced_memory_finalize,
2343 MODULE_REGISTER(&memory_modules_ops)