4 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file vmpressure-lowmem-handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
38 #include <sys/types.h>
41 #include <sys/sysinfo.h>
43 #include <sys/resource.h>
46 #include <eventsystem.h>
51 #include "lowmem-handler.h"
52 #include "proc-common.h"
55 #include "resourced.h"
58 #include "config-parser.h"
60 #include "swap-common.h"
62 #include "memory-cgroup.h"
63 #include "heart-common.h"
64 #include "proc-main.h"
65 #include "dbus-handler.h"
67 #include "fd-handler.h"
68 #include "resourced-helper-worker.h"
69 #include "safe-kill.h"
70 #include "dedup-common.h"
72 #define LOWMEM_THRES_INIT 0
74 #define MEMPS_EXEC_PATH "usr/bin/memps"
75 #define MEM_CONF_FILE RD_CONFIG_FILE(limiter)
76 #define MEM_SECTION "Memory"
77 #define MEM_VIP_SECTION "VIP_PROCESS"
78 #define MEM_VIP_PREDEFINE "PREDEFINE"
79 #define MEM_POPUP_SECTION "POPUP"
80 #define MEM_POPUP_STRING "oom_popup"
81 #define MEM_BG_RECLAIM_SECTION "BackgroundReclaim"
82 #define MEM_BG_RECLAIM_STRING "AfterScreenDim"
83 #define MEM_LOGGING_SECTION "Logging"
86 #define MAX_VICTIMS_BETWEEN_CHECK 3
87 #define MAX_PROACTIVE_LOW_VICTIMS 2
88 #define MAX_PROACTIVE_HIGH_VICTIMS 4
89 #define FOREGROUND_VICTIMS 1
90 #define OOM_TIMER_INTERVAL 2
91 #define OOM_KILLER_PRIORITY -20
92 #define THRESHOLD_MARGIN 10 /* MB */
94 #define MEM_SIZE_64 64 /* MB */
95 #define MEM_SIZE_256 256 /* MB */
96 #define MEM_SIZE_448 448 /* MB */
97 #define MEM_SIZE_512 512 /* MB */
98 #define MEM_SIZE_768 768 /* MB */
99 #define MEM_SIZE_1024 1024 /* MB */
100 #define MEM_SIZE_2048 2048 /* MB */
102 /* thresholds for 64M RAM*/
103 #define PROACTIVE_64_THRES 10 /* MB */
104 #define PROACTIVE_64_LEAVE 30 /* MB */
105 #define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
106 #define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
107 #define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
108 #define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
109 #define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
110 #define CGROUP_ROOT_64_NUM_VICTIMS 1
112 /* thresholds for 256M RAM */
113 #define PROACTIVE_256_THRES 50 /* MB */
114 #define PROACTIVE_256_LEAVE 80 /* MB */
115 #define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
116 #define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
117 #define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
118 #define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
119 #define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
120 #define CGROUP_ROOT_256_NUM_VICTIMS 2
122 /* threshold for 448M RAM */
123 #define PROACTIVE_448_THRES 80 /* MB */
124 #define PROACTIVE_448_LEAVE 100 /* MB */
125 #define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
126 #define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
127 #define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
128 #define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
129 #define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
130 #define CGROUP_ROOT_448_NUM_VICTIMS 5
132 /* threshold for 512M RAM */
133 #define PROACTIVE_512_THRES 100 /* MB */
134 #define PROACTIVE_512_LEAVE 80 /* MB */
135 #define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
136 #define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
137 #define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
138 #define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
139 #define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
140 #define CGROUP_ROOT_512_NUM_VICTIMS 5
142 /* threshold for 768 RAM */
143 #define PROACTIVE_768_THRES 100 /* MB */
144 #define PROACTIVE_768_LEAVE 130 /* MB */
145 #define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
146 #define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
147 #define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
148 #define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
149 #define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
150 #define CGROUP_ROOT_768_NUM_VICTIMS 5
152 /* threshold for more than 1024M RAM */
153 #define PROACTIVE_1024_THRES 230 /* MB */
154 #define PROACTIVE_1024_LEAVE 150 /* MB */
155 #define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
156 #define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
157 #define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
158 #define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
159 #define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
160 #define CGROUP_ROOT_1024_NUM_VICTIMS 5
162 /* threshold for more than 2048M RAM */
163 #define PROACTIVE_2048_THRES 200 /* MB */
164 #define PROACTIVE_2048_LEAVE 500 /* MB */
165 #define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
166 #define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
167 #define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
168 #define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
169 #define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
170 #define CGROUP_ROOT_2048_NUM_VICTIMS 10
172 /* threshold for more than 3072M RAM */
173 #define PROACTIVE_3072_THRES 300 /* MB */
174 #define PROACTIVE_3072_LEAVE 700 /* MB */
175 #define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
176 #define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
177 #define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
178 #define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
179 #define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
180 #define CGROUP_ROOT_3072_NUM_VICTIMS 10
182 static unsigned proactive_threshold;
183 static unsigned proactive_leave;
184 static unsigned lmk_start_threshold;
186 static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
189 * Resourced Low Memory Killer
190 * NOTE: planned to be moved to a separate file.
192 /*-------------------------------------------------*/
193 #define OOM_TIMER_INTERVAL_SEC 2
194 #define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
195 #define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
197 struct lowmem_control {
199 * For each queued request the following properties
200 * are required with two exceptions:
201 * - status is being set by LMK
202 * - callback is optional
204 /* Processing flags*/
206 /* Indictator for OOM score of targeted processes */
207 enum cgroup_type type;
209 /* Desired size to be restored - level to be reached (MB)*/
211 /* Max number of processes to be considered */
213 /* Memory reclaim status */
216 * Optional - if set, will be triggered by LMK once the request
219 void (*callback) (struct lowmem_control *);
222 struct lowmem_worker {
223 pthread_t worker_thread;
229 static struct lowmem_worker lmw;
231 //static int memlog_enabled;
232 //static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
233 /* remove logfiles to reduce to this threshold.
234 * it is about five-sixths of the memlog_nr_max. */
235 //static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
236 //static char *memlog_path = DEFAULT_MEMLOG_PATH;
237 //static char *memlog_prefix[MEMLOG_MAX];
239 #define LOWMEM_WORKER_IS_ACTIVE(_lmw) g_atomic_int_get(&(_lmw)->active)
240 #define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
241 #define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
243 #define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
244 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
245 #define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
247 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
249 #define LOWMEM_DESTROY_REQUEST(_ctl) \
250 g_slice_free(typeof(*(_ctl)), _ctl); \
252 #define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb) \
254 (c)->flags = __flags; (c)->type = __type; \
255 (c)->size = __size; (c)->count = __count; \
256 (c)->callback = __cb; \
260 #define APP_ATTR_PATH "/proc/%d/attr/current"
262 static int get_privilege(pid_t pid, char *name, size_t len)
269 snprintf(path, sizeof(path), APP_ATTR_PATH, pid);
271 fp = fopen(path, "r");
275 attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
280 attr[attr_len] = '\0';
282 snprintf(name, len, "%s", attr);
286 static int is_app(pid_t pid)
292 ret = get_privilege(pid, attr, sizeof(attr));
294 _E("Failed to get privilege of PID(%d).", pid);
298 len = strlen(attr) + 1;
300 if (!strncmp("System", attr, len))
303 if (!strncmp("User", attr, len))
306 if (!strncmp("System::Privileged", attr, len))
313 static void lowmem_queue_request(struct lowmem_worker *lmw,
314 struct lowmem_control *ctl)
316 if (LOWMEM_WORKER_IS_ACTIVE(lmw))
317 g_async_queue_push(lmw->queue, ctl);
321 static void lowmem_drain_queue(struct lowmem_worker *lmw)
323 struct lowmem_control *ctl;
325 g_async_queue_lock(lmw->queue);
326 while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
329 LOWMEM_DESTROY_REQUEST(ctl);
331 g_async_queue_unlock(lmw->queue);
334 static void lowmem_request_destroy(gpointer data)
336 struct lowmem_control *ctl = (struct lowmem_control*) data;
340 LOWMEM_DESTROY_REQUEST(ctl);
343 /*-------------------------------------------------*/
345 /* low memory action function for cgroup */
346 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi);
347 /* low memory action function */
348 static void high_mem_act(void);
349 static void swap_activate_act(void);
350 static void swap_compact_act(void);
351 static void lmk_act(void);
354 static size_t cur_mem_state = MEM_LEVEL_HIGH;
355 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
356 static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
358 static unsigned long totalram;
359 static unsigned long ktotalram;
361 static struct module_ops memory_modules_ops;
362 static const struct module_ops *lowmem_ops;
363 static bool oom_popup_enable;
364 static bool oom_popup;
365 static bool memcg_swap_status;
366 static bool bg_reclaim;
367 static int fragmentation_size;
369 static const char *convert_cgroup_type_to_str(int type)
371 static const char *type_table[] =
372 {"/", "VIP", "High", "Medium", "Lowest"};
373 if (type >= CGROUP_ROOT && type <= CGROUP_LOW)
374 return type_table[type];
379 static const char *convert_status_to_str(int status)
381 static const char *status_table[] =
382 {"none", "done", "drop", "cont", "retry", "next_type"};
383 if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
384 return status_table[status];
385 return "error status";
388 static const char *convert_memstate_to_str(int mem_state)
390 static const char *state_table[] = {"mem high", "mem medium",
391 "mem low", "mem critical", "mem oom",};
392 if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
393 return state_table[mem_state];
397 static int lowmem_launch_oompopup(void)
399 GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
400 g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
402 GVariant *const params = g_variant_new("(a{ss})", gv_builder);
403 g_variant_builder_unref(gv_builder);
405 int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
406 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
407 "PopupLaunch", params);
409 g_variant_unref(params);
414 static inline void get_total_memory(void)
421 totalram = si.totalram;
422 ktotalram = BYTE_TO_KBYTE(totalram);
426 static int lowmem_mem_usage_uss(pid_t pid, unsigned int *usage)
428 unsigned int uss, zram = 0;
434 * In lowmem we need to know memory size of processes to
435 * for terminating apps. To get most real value of usage
436 * we should use USS + ZRAM usage for selected process.
438 * Those values will contain the most approximated amount
439 * of memory that will be freed after process termination.
441 ret = proc_get_uss(pid, &uss);
442 if (ret != RESOURCED_ERROR_NONE)
445 if (swap_get_state() == SWAP_ON) {
446 ret = proc_get_zram_usage(pid, &zram);
447 /* If we don't get zram usage, it's not a problem */
448 if (ret != RESOURCED_ERROR_NONE)
452 return RESOURCED_ERROR_NONE;
455 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
457 unsigned int size = 0, total_size = 0;
462 * If pids are allocated only when there are multiple processes with
463 * the same pgid e.g., browser and web process. Mostly, single process
466 if (tsk->pids == NULL) {
467 ret = proc_get_ram_usage(tsk->pid, &size);
469 /* If there is no proc entry for given pid the process
470 * should be abandoned during further processing
473 _D("failed to get rss memory usage of %d", tsk->pid);
478 for (index = 0; index < tsk->pids->len; index++) {
479 pid = g_array_index(tsk->pids, pid_t, index);
480 ret = proc_get_ram_usage(pid, &size);
481 if (ret != RESOURCED_ERROR_NONE)
489 /*static int memps_file_select(const struct dirent *entry)
491 return strstr(entry->d_name, "memps") ? 1 : 0;
494 static char *strrstr(const char *str, const char *token)
496 int len = strlen(token);
497 const char *p = str + strlen(str);
500 if (p[0] == token[0] && strncmp(p, token, len) == 0)
506 static int timesort(const struct dirent **a, const struct dirent **b)
512 ptr = strrstr((*a)->d_name, "_");
516 ptr = strrstr((*b)->d_name, "_");
520 return (time1 - time2);
523 static int clear_logs(void *data)
525 struct dirent **namelist;
529 char *dir = (char*)data;
533 return RESOURCED_ERROR_NONE;
536 return RESOURCED_ERROR_NONE;
539 if (len <= 0 || len >= sizeof fpath - 1) {
540 _E("Invalid parameter - Directory path is too short or too long");
541 return RESOURCED_ERROR_INVALID_PARAMETER;
544 n = scandir(dir, &namelist, memps_file_select, timesort);
546 _D("num of log files %d", n);
547 if (n <= memlog_nr_max) {
551 return RESOURCED_ERROR_NONE;
554 strncpy(fpath, dir, sizeof fpath - 1);
555 fpath[sizeof fpath - 1] = '\0';
559 len = sizeof fpath - len - 1;
560 for (i = 0; i < n; i++) {
561 if (i < n - memlog_remove_batch_thres) {
562 if (strlen(namelist[i]->d_name) > len - 1)
564 strncpy(fname, namelist[i]->d_name, len - 1);
565 fpath[sizeof fpath - 1] = '\0';
566 _D("remove log file %s", fpath);
569 _E("%s file cannot removed", fpath);
575 return RESOURCED_ERROR_NONE;
578 void make_memps_log(enum mem_log memlog, pid_t pid, char *victim_name)
582 char new_log[BUF_MAX];
583 static pid_t old_pid;
584 int oom_score_adj = 0, ret;
590 if (memlog < MEMLOG_MEMPS || memlog >= MEMLOG_MAX)
593 prefix = memlog_prefix[memlog];
602 if (localtime_r(&now, &cur_tm) == NULL) {
603 _E("Fail to get localtime");
607 snprintf(new_log, sizeof(new_log),
608 "%s/%s_%s_%d_%.4d%.2d%.2d%.2d%.2d%.2d", memlog_path, prefix, victim_name,
609 pid, (1900 + cur_tm.tm_year), 1 + cur_tm.tm_mon,
610 cur_tm.tm_mday, cur_tm.tm_hour, cur_tm.tm_min,
613 ret = proc_get_oom_score_adj(pid, &oom_score_adj);
614 if (ret || oom_score_adj > OOMADJ_BACKGRD_LOCKED) {
616 _cleanup_fclose_ FILE *f = NULL;
618 f = fopen(new_log, "w");
620 _E("fail to create memps log %s", new_log);
623 proc_print_meninfo(f);
627 const char *argv[4] = {"/usr/bin/memps", "-f", NULL, NULL};
630 exec_cmd(ARRAY_SIZE(argv), argv);
633 clear_logs(memlog_path);
636 static int lowmem_kill_victim(const struct task_info *tsk,
637 int flags, int memps_log, unsigned int *victim_size)
641 char appname[PATH_MAX];
643 struct proc_app_info *pai;
647 if (pid <= 0 || pid == getpid())
648 return RESOURCED_ERROR_FAIL;
650 ret = proc_get_cmdline(pid, appname, sizeof appname);
651 if (ret == RESOURCED_ERROR_FAIL)
652 return RESOURCED_ERROR_FAIL;
654 if (!strcmp("memps", appname) ||
655 !strcmp("crash-worker", appname) ||
656 !strcmp("system-syspopup", appname)) {
657 _E("%s(%d) was selected, skip it", appname, pid);
658 return RESOURCED_ERROR_FAIL;
662 make_memps_log(MEMLOG_MEMPS, pid, appname);*/
666 resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
667 pid, NULL, NULL, PROC_TYPE_NONE);
669 if (tsk->oom_score_lru <= OOMADJ_BACKGRD_LOCKED) {
671 } else if (tsk->oom_score_lru > OOMADJ_BACKGRD_LOCKED && tsk->oom_score_lru < OOMADJ_BACKGRD_UNLOCKED) {
672 int app_flag = pai->flags;
673 sigterm = app_flag & PROC_SIGTERM;
676 if (pai->memory.oom_killed)
679 pai->memory.oom_killed = true;
683 safe_kill(pid, SIGTERM);
685 safe_kill(pid, SIGKILL);
687 _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u, sigterm = %d\n",
688 flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
690 *victim_size = tsk->size;
692 if (tsk->oom_score_lru > OOMADJ_FOREGRD_UNLOCKED)
693 return RESOURCED_ERROR_NONE;
695 if (oom_popup_enable && !oom_popup) {
696 lowmem_launch_oompopup();
700 make_memps_log(MEMLOG_MEMPS, pid, appname);*/
702 return RESOURCED_ERROR_NONE;
705 /* return LOWMEM_RECLAIM_CONT when killing should be continued */
706 static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
708 unsigned int available;
711 * Processes with the priority higher than perceptible are killed
712 * only when the available memory is less than dynamic oom threshold.
714 if (tsk->oom_score_lru > OOMADJ_BACKGRD_PERCEPTIBLE)
715 return LOWMEM_RECLAIM_CONT;
717 if (flags & (OOM_FORCE|OOM_SINGLE_SHOT)) {
718 _I("[LMK] %d is dropped during force kill, flag=%d",
720 return LOWMEM_RECLAIM_DROP;
722 available = proc_get_mem_available();
723 if (available > lmk_start_threshold) {
724 _I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
725 available, lmk_start_threshold);
726 return LOWMEM_RECLAIM_RETRY;
728 return LOWMEM_RECLAIM_CONT;
731 static int compare_victims(const struct task_info *ta, const struct task_info *tb)
738 * followed by kernel badness point calculation using heuristic.
739 * oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
741 pa = ta->oom_score_lru * (ktotalram / 2000) + ta->size;
742 pb = tb->oom_score_lru * (ktotalram / 2000) + tb->size;
747 static void lowmem_free_task_info_array(GArray *array)
751 for (i = 0; i < array->len; i++) {
752 struct task_info *tsk;
754 tsk = &g_array_index(array, struct task_info, i);
756 g_array_free(tsk->pids, true);
759 g_array_free(array, true);
762 static inline int is_dynamic_process_killer(int flags)
764 return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
767 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
769 unsigned int available = proc_get_mem_available();
770 unsigned int should_be_freed = 0;
772 if (available < thres)
773 should_be_freed = thres - available;
775 * free THRESHOLD_MARGIN more than real should be freed,
776 * because launching app is consuming up the memory.
778 if (should_be_freed > 0)
779 should_be_freed += THRESHOLD_MARGIN;
783 return should_be_freed;
786 static int lowmem_get_pids_proc(GArray *pids)
789 struct dirent *dentry;
791 dp = opendir("/proc");
793 _E("fail to open /proc");
794 return RESOURCED_ERROR_FAIL;
796 while ((dentry = readdir(dp)) != NULL) {
797 struct task_info tsk;
798 pid_t pid = 0, pgid = 0;
801 if (!isdigit(dentry->d_name[0]))
804 pid = (pid_t)atoi(dentry->d_name);
806 /* skip invalid pids or kernel processes */
816 if (proc_get_oom_score_adj(pid, &oom) < 0) {
817 _D("pid(%d) was already terminated", pid);
821 /* VIP pids should be excluded from the LMK list */
822 if (cgroup_get_type(oom) == CGROUP_VIP)
826 * Check whether this array includes applications or not.
827 * If it doesn't require to get applications
828 * and pid has been already included in pai,
831 if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
835 * Currently, for tasks in the memory cgroup,
836 * do not consider multiple tasks with one pgid.
840 tsk.oom_score_adj = oom;
841 tsk.oom_score_lru = oom;
843 tsk.size = lowmem_get_task_mem_usage_rss(&tsk);
846 g_array_append_val(pids, tsk);
850 return RESOURCED_ERROR_NONE;
854 * @brief Terminate up to max_victims processes after finding them from pai.
855 It depends on proc_app_info lists
856 and it also reference systemservice cgroup
857 because some processes in this group don't have proc_app_info.
859 * @max_victims: max number of processes to be terminated
860 * @start_oom: find victims from start oom adj score value
861 * @end_oom: find victims to end oom adj score value
862 * @should_be_freed: amount of memory to be reclaimed (in MB)
863 * @total_size[out]: total size of possibly reclaimed memory (required)
864 * @completed: final outcome (optional)
865 * @threshold: desired value of memory available
867 static int lowmem_kill_victims(int max_victims,
868 int start_oom, int end_oom, unsigned should_be_freed, int flags,
869 unsigned int *total_size, int *completed, int threshold)
872 GSList *proc_app_list = NULL;
873 int i, ret, victim = 0;
874 unsigned int victim_size = 0;
875 unsigned int total_victim_size = 0;
876 int status = LOWMEM_RECLAIM_NONE;
877 GArray *candidates = NULL;
878 GSList *iter, *iterchild;
879 struct proc_app_info *pai = NULL;
881 int should_be_freed_kb = MBYTE_TO_KBYTE(should_be_freed);
883 candidates = g_array_new(false, false, sizeof(struct task_info));
885 proc_app_list = proc_app_list_open();
886 gslist_for_each_item(iter, proc_app_list) {
890 pai = (struct proc_app_info *)iter->data;
894 oom_score_adj = pai->memory.oom_score_adj;
895 if (oom_score_adj > end_oom || oom_score_adj < start_oom)
898 if ((flags & OOM_REVISE) && pai->memory.oom_killed)
901 ti.pid = pai->main_pid;
902 ti.pgid = getpgid(ti.pid);
903 ti.oom_score_adj = oom_score_adj;
907 * Before oom_score_adj of favourite (oom_score = 270) applications is
908 * independent of lru_state, now we consider lru_state, while
909 * killing favourite process.
912 if (oom_score_adj == OOMADJ_FAVORITE && pai->lru_state >= PROC_BACKGROUND)
913 ti.oom_score_lru = OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE * pai->lru_state;
915 ti.oom_score_lru = oom_score_adj;
918 ti.pids = g_array_new(false, false, sizeof(pid_t));
919 g_array_append_val(ti.pids, ti.pid);
920 gslist_for_each_item(iterchild, pai->childs) {
921 pid_t child = GPOINTER_TO_PID(iterchild->data);
922 g_array_append_val(ti.pids, child);
927 g_array_append_val(candidates, ti);
930 proc_app_list_close();
932 if (!candidates->len) {
933 status = LOWMEM_RECLAIM_NEXT_TYPE;
937 _D("[LMK] candidate ratio=%d/%d", candidates->len, total_count);
940 for (i = 0; i < candidates->len; i++) {
941 struct task_info *tsk;
943 tsk = &g_array_index(candidates, struct task_info, i);
944 tsk->size = lowmem_get_task_mem_usage_rss(tsk);
948 * In case of start_oom == OOMADJ_SU,
949 * we're going to try to kill some of processes in /proc
950 * to handle low memory situation.
951 * It can find malicious system process even though it has low oom score.
953 if (start_oom == OOMADJ_SU)
954 lowmem_get_pids_proc(candidates);
956 g_array_sort(candidates, (GCompareFunc)compare_victims);
958 for (i = 0; i < candidates->len; i++) {
959 struct task_info *tsk;
961 if (i >= max_victims) {
962 status = LOWMEM_RECLAIM_NEXT_TYPE;
967 * Available memory is checking only every
968 * num_vict_between_check process for reducing burden.
970 if (!(i % num_vict_between_check)) {
971 if (proc_get_mem_available() > threshold) {
972 status = LOWMEM_RECLAIM_DONE;
977 if (!(flags & OOM_NOMEMORY_CHECK) &&
978 total_victim_size >= should_be_freed_kb) {
979 _D("[LMK] victim=%d, max_victims=%d, total_size=%uKB",
980 victim, max_victims, total_victim_size);
981 status = LOWMEM_RECLAIM_DONE;
985 tsk = &g_array_index(candidates, struct task_info, i);
987 status = lowmem_check_kill_continued(tsk, flags);
988 if (status != LOWMEM_RECLAIM_CONT)
991 _I("[LMK] select victims from proc_app_list pid(%d) with oom_score_adj(%d)\n", tsk->pid, tsk->oom_score_adj);
993 ret = lowmem_kill_victim(tsk, flags, i, &victim_size);
994 if (ret != RESOURCED_ERROR_NONE)
997 total_victim_size += victim_size;
1001 lowmem_free_task_info_array(candidates);
1002 *total_size = total_victim_size;
1003 if(*completed != LOWMEM_RECLAIM_CONT)
1004 *completed = status;
1006 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
1010 static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
1012 if (type == CGROUP_VIP || type >= CGROUP_END || type <= CGROUP_TOP) {
1013 _E("cgroup type (%d) is out of scope", type);
1014 return RESOURCED_ERROR_FAIL;
1017 *max = cgroup_get_highest_oom_score_adj(type);
1018 *min = cgroup_get_lowest_oom_score_adj(type);
1020 return RESOURCED_ERROR_NONE;
1023 static void lowmem_handle_request(struct lowmem_control *ctl)
1025 int start_oom, end_oom;
1026 int count = 0, victim_cnt = 0;
1027 int max_victim_cnt = ctl->count;
1028 int status = LOWMEM_RECLAIM_NONE;
1029 unsigned int available = 0;
1030 unsigned int total_size = 0;
1031 unsigned int current_size = 0;
1032 unsigned int reclaim_size, shortfall = 0;
1033 enum cgroup_type cgroup_type = ctl->type;
1035 available = proc_get_mem_available();
1036 reclaim_size = ctl->size > available
1037 ? ctl->size - available : 0;
1039 if (!reclaim_size) {
1040 status = LOWMEM_RECLAIM_DONE;
1045 /* Prepare LMK to start doing it's job. Check preconditions. */
1046 if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
1049 lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1050 shortfall = is_memory_recovered(&available, ctl->size);
1052 if (!shortfall || !reclaim_size) {
1053 status = LOWMEM_RECLAIM_DONE;
1059 victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
1060 reclaim_size, ctl->flags, ¤t_size, &status, ctl->size);
1063 current_size = KBYTE_TO_MBYTE(current_size);
1064 reclaim_size -= reclaim_size > current_size
1065 ? current_size : reclaim_size;
1066 total_size += current_size;
1067 count += victim_cnt;
1068 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
1069 victim_cnt, current_size,
1070 start_oom, end_oom, convert_status_to_str(status));
1073 if ((status == LOWMEM_RECLAIM_DONE) ||
1074 (status == LOWMEM_RECLAIM_DROP) ||
1075 (status == LOWMEM_RECLAIM_RETRY))
1079 * If it doesn't finish reclaiming memory in first operation,
1080 - if flags has OOM_IN_DEPTH,
1081 try to find victims again in the active cgroup.
1082 otherwise, just return because there is no more victims in the desired cgroup.
1083 - if flags has OOM_REVISE,
1084 it means that resourced can't find victims from proc_app_list.
1085 So, it should search victims or malicious process from /proc.
1086 But searching /proc leads to abnormal behaviour.
1087 (Make sluggish or kill same victims continuously)
1088 Thus, otherwise, just return in first operation and wait some period.
1090 if (cgroup_type == CGROUP_LOW) {
1091 cgroup_type = CGROUP_MEDIUM;
1093 } else if ((cgroup_type == CGROUP_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
1094 cgroup_type = CGROUP_HIGH;
1095 if(ctl->flags & OOM_FORCE)
1096 max_victim_cnt = FOREGROUND_VICTIMS;
1098 } else if ((cgroup_type == CGROUP_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
1099 status = LOWMEM_RECLAIM_RETRY;
1100 ctl->type = CGROUP_ROOT;
1102 else if (cgroup_type == CGROUP_ROOT) {
1103 status = LOWMEM_RECLAIM_RETRY;
1106 _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
1107 count, total_size, reclaim_size, shortfall, convert_status_to_str(status));
1109 /* After we finish reclaiming it's worth to remove oldest memps logs */
1110 /* if (count && memlog_enabled)
1111 request_helper_worker(CLEAR_LOGS, memlog_path, clear_logs, NULL);*/
1112 ctl->status = status;
1115 static void *lowmem_reclaim_worker(void *arg)
1117 struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
1119 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
1121 g_async_queue_ref(lmw->queue);
1125 struct lowmem_control *ctl;
1127 LOWMEM_WORKER_IDLE(lmw);
1128 /* Wait on any wake-up call */
1129 ctl = g_async_queue_pop(lmw->queue);
1131 if (ctl->flags & OOM_DROP)
1132 LOWMEM_DESTROY_REQUEST(ctl);
1134 if (!LOWMEM_WORKER_IS_ACTIVE(lmw) || !ctl)
1137 LOWMEM_WORKER_RUN(lmw);
1139 _D("[LMK] %d tries", ++try_count);
1140 lowmem_handle_request(ctl);
1142 * Case the process failed to reclaim requested amount of memory
1143 * or still under have memory pressure - try the timeout wait.
1144 * There is a chance this will get woken-up in a better reality.
1146 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
1147 !(ctl->flags & OOM_SINGLE_SHOT)) {
1148 unsigned int available = proc_get_mem_available();
1150 if (available >= ctl->size) {
1151 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
1152 ctl->size, available);
1153 ctl->status = LOWMEM_RECLAIM_DONE;
1156 LOWMEM_DESTROY_REQUEST(ctl);
1157 LOWMEM_WORKER_IDLE(lmw);
1161 if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
1162 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
1163 ctl->flags |= OOM_REVISE;
1169 * The ctl callback would check available size again.
1170 * And it is last point in reclaiming worker.
1171 * Resourced sent SIGKILL signal to victim processes
1172 * so it should wait for a some seconds until each processes returns memory.
1174 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
1178 /* The lmk becomes the owner of all queued requests .. */
1179 LOWMEM_DESTROY_REQUEST(ctl);
1180 LOWMEM_WORKER_IDLE(lmw);
1182 g_async_queue_unref(lmw->queue);
1186 static void change_lowmem_state(unsigned int mem_state)
1188 cur_mem_state = mem_state;
1189 lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
1191 resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
1192 (void *)&cur_mem_state);
1195 /* only app can call this function
1196 * that is, service cannot call the function
1198 static void lowmem_swap_memory(char *path)
1200 unsigned int available;
1202 if (cur_mem_state == MEM_LEVEL_HIGH)
1205 if (swap_get_state() != SWAP_ON)
1208 available = proc_get_mem_available();
1209 if (cur_mem_state != MEM_LEVEL_LOW &&
1210 available <= get_root_memcg_info()->threshold[MEM_LEVEL_LOW])
1211 swap_activate_act();
1213 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1214 memcg_swap_status = true;
1217 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
1221 int lowest_oom_score_adj;
1224 _E("[SWAP] Unknown memory cgroup path to swap");
1228 /* In this case, corresponding process will be moved to memory CGROUP_LOW.
1231 error = proc_get_oom_score_adj(pid, &oom_score_adj);
1233 _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
1237 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(CGROUP_LOW);
1239 if (oom_score_adj < lowest_oom_score_adj) {
1240 oom_score_adj = lowest_oom_score_adj;
1241 /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
1242 proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
1247 /* Correponding process is already managed per app or service.
1248 * In addition, if some process is already located in the CGROUP_LOW, then just do swap
1250 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
1253 static void memory_level_send_system_event(int lv)
1259 case MEM_LEVEL_HIGH:
1260 case MEM_LEVEL_MEDIUM:
1262 str = EVT_VAL_MEMORY_NORMAL;
1264 case MEM_LEVEL_CRITICAL:
1265 str = EVT_VAL_MEMORY_SOFT_WARNING;
1268 str = EVT_VAL_MEMORY_HARD_WARNING;
1271 _E("Invalid state");
1275 b = bundle_create();
1277 _E("Failed to create bundle");
1281 bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
1282 eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
1286 static void high_mem_act(void)
1290 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1292 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1293 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1294 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1295 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1296 memory_level_send_system_event(MEM_LEVEL_HIGH);
1299 change_lowmem_state(MEM_LEVEL_HIGH);
1301 if (swap_get_state() == SWAP_ON && memcg_swap_status) {
1302 resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(CGROUP_LOW));
1303 memcg_swap_status = false;
1305 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1306 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1307 (void *)CGROUP_FREEZER_ENABLED);
1310 static void swap_activate_act(void)
1314 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1316 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1318 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1319 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1320 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1321 memory_level_send_system_event(MEM_LEVEL_LOW);
1323 change_lowmem_state(MEM_LEVEL_LOW);
1324 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1325 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1326 (void *)CGROUP_FREEZER_ENABLED);
1328 if (swap_get_state() != SWAP_ON)
1329 resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
1332 static void dedup_act(enum ksm_scan_mode mode)
1337 if (dedup_get_state() != DEDUP_ONE_SHOT)
1340 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
1341 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1342 (void *)CGROUP_FREEZER_ENABLED);
1344 if (mode == KSM_SCAN_PARTIAL) {
1345 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1347 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1349 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
1350 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1351 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
1352 memory_level_send_system_event(MEM_LEVEL_MEDIUM);
1354 change_lowmem_state(MEM_LEVEL_MEDIUM);
1356 data = KSM_SCAN_PARTIAL;
1357 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1358 } else if (mode == KSM_SCAN_FULL) {
1359 data = KSM_SCAN_FULL;
1360 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
1364 static void swap_compact_act(void)
1366 change_lowmem_state(MEM_LEVEL_CRITICAL);
1367 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
1368 memory_level_send_system_event(MEM_LEVEL_CRITICAL);
1371 static void medium_cb(struct lowmem_control *ctl)
1373 if (ctl->status == LOWMEM_RECLAIM_DONE)
1375 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1378 static void lmk_act(void)
1380 unsigned int available;
1382 int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
1385 * Don't trigger reclaim worker
1386 * if it is already running
1388 if (LOWMEM_WORKER_IS_RUNNING(&lmw))
1391 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1393 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1395 memory_level_send_system_event(MEM_LEVEL_OOM);
1396 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
1397 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
1398 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1399 (void *)CGROUP_FREEZER_PAUSED);
1400 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1401 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1403 available = proc_get_mem_available();
1405 change_lowmem_state(MEM_LEVEL_OOM);
1407 if (available < get_root_memcg_info()->threshold_leave) {
1408 struct lowmem_control *ctl;
1410 ctl = LOWMEM_NEW_REQUEST();
1412 LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
1413 CGROUP_LOW, get_root_memcg_info()->threshold_leave,
1414 num_max_victims, medium_cb);
1415 lowmem_queue_request(&lmw, ctl);
1419 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
1422 * Flush resourced memory such as other processes.
1423 * Resourced can use both many fast bins and sqlite3 cache memery.
1430 static void lowmem_trigger_memory_state_action(int mem_state)
1433 * Check if the state we want to set is different from current
1434 * But it should except this condition if mem_state is already medium.
1435 * Otherwise, recalim worker couldn't run any more.
1437 if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
1440 switch (mem_state) {
1441 case MEM_LEVEL_HIGH:
1444 case MEM_LEVEL_MEDIUM:
1445 dedup_act(KSM_SCAN_PARTIAL);
1448 swap_activate_act();
1450 case MEM_LEVEL_CRITICAL:
1451 dedup_act(KSM_SCAN_FULL);
1462 static void lowmem_dump_cgroup_procs(struct memcg_info *mi)
1467 GArray *pids_array = NULL;
1469 cgroup_get_pids(mi->name, &pids_array);
1471 for (i = 0; i < pids_array->len; i++) {
1472 pid = g_array_index(pids_array, pid_t, i);
1473 lowmem_mem_usage_uss(pid, &size);
1474 _I("pid = %d, size = %u KB", pid, size);
1476 g_array_free(pids_array, true);
1479 static void memory_cgroup_proactive_lmk_act(enum cgroup_type type, struct memcg_info *mi)
1481 struct lowmem_control *ctl;
1483 /* To Do: only start to kill fg victim when no pending fg victim */
1484 lowmem_dump_cgroup_procs(mi);
1486 ctl = LOWMEM_NEW_REQUEST();
1488 LOWMEM_SET_REQUEST(ctl, OOM_SINGLE_SHOT | OOM_IN_DEPTH, type,
1489 mi->oomleave, num_max_victims, NULL);
1490 lowmem_queue_request(&lmw, ctl);
1494 static unsigned int check_mem_state(unsigned int available)
1497 for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
1498 if (mem_state != MEM_LEVEL_OOM && available <= get_root_memcg_info()->threshold[mem_state])
1500 else if (mem_state == MEM_LEVEL_OOM && available <= lmk_start_threshold)
1507 /*static int load_bg_reclaim_config(struct parse_result *result, void *user_data)
1510 return RESOURCED_ERROR_INVALID_PARAMETER;
1512 if (strncmp(result->section, MEM_BG_RECLAIM_SECTION, strlen(MEM_BG_RECLAIM_SECTION)+1))
1513 return RESOURCED_ERROR_NONE;
1515 if (!strncmp(result->name, MEM_BG_RECLAIM_STRING, strlen(MEM_BG_RECLAIM_STRING)+1)) {
1516 if (!strncmp(result->value, "yes", strlen("yes")+1))
1518 else if (!strncmp(result->value, "no", strlen("no")+1))
1523 return RESOURCED_ERROR_NONE;
1526 static int load_popup_config(struct parse_result *result, void *user_data)
1529 return RESOURCED_ERROR_INVALID_PARAMETER;
1531 if (strncmp(result->section, MEM_POPUP_SECTION, strlen(MEM_POPUP_SECTION)+1))
1532 return RESOURCED_ERROR_NONE;
1534 if (!strncmp(result->name, MEM_POPUP_STRING, strlen(MEM_POPUP_STRING)+1)) {
1535 if (!strncmp(result->value, "yes", strlen("yes")+1))
1536 oom_popup_enable = true;
1537 else if (!strncmp(result->value, "no", strlen("no")+1))
1538 oom_popup_enable = false;
1542 return RESOURCED_ERROR_NONE;
1545 static int load_mem_log_config(struct parse_result *result, void *user_data)
1548 return RESOURCED_ERROR_INVALID_PARAMETER;
1550 if (strncmp(result->section, MEM_LOGGING_SECTION, strlen(MEM_LOGGING_SECTION)+1))
1551 return RESOURCED_ERROR_NONE;
1553 if (!strncmp(result->name, "Enable", strlen("Enable")+1)) {
1554 memlog_enabled = atoi(result->value);
1555 } else if (!strncmp(result->name, "LogPath", strlen("LogPath")+1)) {
1556 memlog_path = strdup(result->value);
1557 } else if (!strncmp(result->name, "MaxNumLogfile", strlen("MaxNumLogfile")+1)) {
1558 memlog_nr_max = atoi(result->value);
1559 memlog_remove_batch_thres = (memlog_nr_max * 5) / 6;
1560 } else if (!strncmp(result->name, "PrefixMemps", strlen("PrefixMemps")+1)) {
1561 memlog_prefix[MEMLOG_MEMPS] = strdup(result->value);
1562 } else if (!strncmp(result->name, "PrefixMempsMemLimit", strlen("PrefixMempsMemLimit")+1)) {
1563 memlog_prefix[MEMLOG_MEMPS_MEMLIMIT] = strdup(result->value);
1566 return RESOURCED_ERROR_NONE;
1569 static int set_memory_config(struct parse_result *result, void *user_data)
1572 return RESOURCED_ERROR_NONE;
1574 if (strncmp(result->section, MEM_SECTION, strlen(MEM_SECTION)+1))
1575 return RESOURCED_ERROR_NONE;
1577 if (!strncmp(result->name, "ThresholdDedup", strlen("ThresholdDedup")+1)) {
1578 int value = atoi(result->value);
1579 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, value);
1580 } else if (!strncmp(result->name, "ThresholdSwap", strlen("ThresholdSwap")+1)) {
1581 int value = atoi(result->value);
1582 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, value);
1583 } else if (!strncmp(result->name, "ThresholdLow", strlen("ThresholdLow")+1)) {
1584 int value = atoi(result->value);
1585 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, value);
1586 } else if (!strncmp(result->name, "ThresholdMedium", strlen("ThresholdMedium")+1)) {
1587 int value = atoi(result->value);
1588 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, value);
1589 } else if (!strncmp(result->name, "ThresholdLeave", strlen("ThresholdLeave")+1)) {
1590 int value = atoi(result->value);
1591 memcg_set_leave_threshold(CGROUP_ROOT, value);
1592 } else if (!strncmp(result->name, "ThresholdRatioDedup", strlen("ThresholdRatioDedup")+1)) {
1593 double ratio = atoi(result->value);
1594 int value = (double)totalram * ratio / 100.0;
1595 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, BYTE_TO_MBYTE(value));
1596 } else if (!strncmp(result->name, "ThresholdRatioSwap", strlen("ThresholdRatioSwap")+1)) {
1597 double ratio = atoi(result->value);
1598 int value = (double)totalram * ratio / 100.0;
1599 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, BYTE_TO_MBYTE(value));
1600 } else if (!strncmp(result->name, "ThresholdRatioLow", strlen("ThresholdRatioLow")+1)) {
1601 double ratio = atoi(result->value);
1602 int value = (double)totalram * ratio / 100.0;
1603 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, BYTE_TO_MBYTE(value));
1604 } else if (!strncmp(result->name, "ThresholdRatioMedium", strlen("ThresholdRatioMedium")+1)) {
1605 double ratio = atoi(result->value);
1606 int value = (double)totalram * ratio / 100.0;
1607 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, BYTE_TO_MBYTE(value));
1608 } else if (!strncmp(result->name, "ThresholdRatioLeave", strlen("ThresholdRatioLeave")+1)) {
1609 double ratio = atoi(result->value);
1610 int value = (double)totalram * ratio / 100.0;
1611 memcg_set_leave_threshold(CGROUP_ROOT, BYTE_TO_MBYTE(value));
1612 } else if (!strncmp(result->name, "ForegroundRatio", strlen("ForegroundRatio")+1)) {
1613 float ratio = atof(result->value);
1614 memcg_info_set_limit(get_memcg_info(CGROUP_HIGH), ratio, totalram);
1615 } else if (!strncmp(result->name, "BackgroundRatio", strlen("BackgroundRatio")+1)) {
1616 float ratio = atof(result->value);
1617 memcg_info_set_limit(get_memcg_info(CGROUP_MEDIUM), ratio, totalram);
1618 } else if (!strncmp(result->name, "LowRatio", strlen("LowRatio")+1)) {
1619 float ratio = atof(result->value);
1620 memcg_info_set_limit(get_memcg_info(CGROUP_LOW), ratio, totalram);
1621 } else if (!strncmp(result->name, "NumMaxVictims", strlen("NumMaxVictims")+1)) {
1622 int value = atoi(result->value);
1623 num_max_victims = value;
1624 num_vict_between_check = value > MAX_MEMORY_CGROUP_VICTIMS/2
1625 ? 3 : value > MAX_MEMORY_CGROUP_VICTIMS/4
1627 } else if (!strncmp(result->name, "ProactiveThreshold", strlen("ProactiveThreshold")+1)) {
1628 int value = atoi(result->value);
1629 proactive_threshold = value;
1630 } else if (!strncmp(result->name, "ProactiveLeave", strlen("ProactiveLeave")+1)) {
1631 int value = atoi(result->value);
1632 proactive_leave = value;
1633 } else if (!strncmp(result->name, "EventLevel", strlen("EventLevel")+1)) {
1634 if (strncmp(event_level, result->value, strlen(event_level)))
1635 event_level = strdup(result->value);
1637 return RESOURCED_ERROR_OUT_OF_MEMORY;
1638 } else if (!strncmp(result->name, "SWAPPINESS", strlen("SWAPPINESS")+1)) {
1639 int value = atoi(result->value);
1640 memcg_set_default_swappiness(value);
1641 memcg_info_set_swappiness(get_memcg_info(CGROUP_ROOT), value);
1642 } else if (!strncmp(result->name, "FOREGROUND_SWAPPINESS", strlen("FOREGROUND_SWAPPINESS")+1)) {
1643 int value = atoi(result->value);
1644 memcg_info_set_swappiness(get_memcg_info(CGROUP_HIGH), value);
1645 } else if (!strncmp(result->name, "BACKGROUND_SWAPPINESS", strlen("BACKGROUND_SWAPPINESS")+1)) {
1646 int value = atoi(result->value);
1647 memcg_info_set_swappiness(get_memcg_info(CGROUP_MEDIUM), value);
1648 } else if (!strncmp(result->name, "LOW_SWAPPINESS", strlen("LOW_SWAPPINESS")+1)) {
1649 int value = atoi(result->value);
1650 memcg_info_set_swappiness(get_memcg_info(CGROUP_LOW), value);
1651 } else if (!strncmp(result->name, "NumFragSize", strlen("NumFragSize")+1)) {
1652 fragmentation_size = atoi(result->value);
1655 return RESOURCED_ERROR_NONE;
1658 /* setup memcg parameters depending on total ram size. */
1659 static void setup_memcg_params(void)
1661 unsigned long long total_ramsize;
1664 total_ramsize = BYTE_TO_MBYTE(totalram);
1666 _D("Total: %llu MB", total_ramsize);
1667 if (total_ramsize <= MEM_SIZE_64) {
1668 /* set thresholds for ram size 64M */
1669 proactive_threshold = PROACTIVE_64_THRES;
1670 proactive_leave = PROACTIVE_64_LEAVE;
1671 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
1672 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
1673 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
1674 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
1675 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
1676 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
1677 } else if (total_ramsize <= MEM_SIZE_256) {
1678 /* set thresholds for ram size 256M */
1679 proactive_threshold = PROACTIVE_256_THRES;
1680 proactive_leave = PROACTIVE_256_LEAVE;
1681 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
1682 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
1683 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
1684 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
1685 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
1686 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
1687 } else if (total_ramsize <= MEM_SIZE_448) {
1688 /* set thresholds for ram size 448M */
1689 proactive_threshold = PROACTIVE_448_THRES;
1690 proactive_leave = PROACTIVE_448_LEAVE;
1691 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
1692 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
1693 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
1694 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1695 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1696 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1697 } else if (total_ramsize <= MEM_SIZE_512) {
1698 /* set thresholds for ram size 512M */
1699 proactive_threshold = PROACTIVE_512_THRES;
1700 proactive_leave = PROACTIVE_512_LEAVE;
1701 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1702 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1703 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1704 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1705 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1706 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1707 } else if (total_ramsize <= MEM_SIZE_768) {
1708 /* set thresholds for ram size 512M */
1709 proactive_threshold = PROACTIVE_768_THRES;
1710 proactive_leave = PROACTIVE_768_LEAVE;
1711 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1712 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1713 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1714 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1715 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1716 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1717 } else if (total_ramsize <= MEM_SIZE_1024) {
1718 /* set thresholds for ram size more than 1G */
1719 proactive_threshold = PROACTIVE_1024_THRES;
1720 proactive_leave = PROACTIVE_1024_LEAVE;
1721 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1722 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1723 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1724 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1725 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1726 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1727 } else if (total_ramsize <= MEM_SIZE_2048) {
1728 proactive_threshold = PROACTIVE_2048_THRES;
1729 proactive_leave = PROACTIVE_2048_LEAVE;
1730 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1731 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1732 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1733 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1734 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1735 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1737 proactive_threshold = PROACTIVE_3072_THRES;
1738 proactive_leave = PROACTIVE_3072_LEAVE;
1739 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1740 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1741 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1742 memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1743 memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1744 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1748 static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
1750 int cur_oom_score_adj;
1752 struct memcg_info *mi;
1753 int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
1755 if(next_memcg_idx < CGROUP_VIP || next_memcg_idx > CGROUP_LOW) {
1756 _E("cgroup type (%d) should not be called", next_memcg_idx);
1759 mi = get_memcg_info(next_memcg_idx);
1766 cgroup_write_pid_fullpath(mi->name, pid);
1771 if (pai->main_pid == pid) {
1772 cur_oom_score_adj = pai->memory.oom_score_adj;
1773 cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
1775 /* -1 means that this pid is not yet registered at the memory cgroup
1776 * plz, reference proc_create_app_info function
1778 if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
1779 /* VIP processes should not be asked to move. */
1780 if (cur_memcg_idx <= CGROUP_VIP) {
1781 _E("[DEBUG] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
1786 _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
1788 if (cur_oom_score_adj == next_oom_score_adj) {
1789 _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
1793 proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
1795 if (!lowmem_limit_move_cgroup(pai))
1798 if(cur_memcg_idx == next_memcg_idx)
1801 cgroup_write_pid_fullpath(mi->name, pid);
1802 if (next_memcg_idx == CGROUP_LOW)
1803 lowmem_swap_memory(get_memcg_info(CGROUP_LOW)->name);
1807 if (pai->memory.use_mem_limit)
1810 cgroup_write_pid_fullpath(mi->name, pid);
1814 static int lowmem_activate_worker(void)
1816 int ret = RESOURCED_ERROR_NONE;
1818 if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1822 lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1824 _E("Failed to create request queue\n");
1825 return RESOURCED_ERROR_FAIL;
1827 LOWMEM_WORKER_ACTIVATE(&lmw);
1828 ret = pthread_create(&lmw.worker_thread, NULL,
1829 (void *)lowmem_reclaim_worker, (void *)&lmw);
1831 LOWMEM_WORKER_DEACTIVATE(&lmw);
1832 _E("Failed to create LMK thread: %d\n", ret);
1834 pthread_detach(lmw.worker_thread);
1835 ret = RESOURCED_ERROR_NONE;
1840 static void lowmem_deactivate_worker(void)
1842 struct lowmem_control *ctl;
1844 if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1847 LOWMEM_WORKER_DEACTIVATE(&lmw);
1848 lowmem_drain_queue(&lmw);
1850 ctl = LOWMEM_NEW_REQUEST();
1852 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1855 ctl->flags = OOM_DROP;
1856 g_async_queue_push(lmw.queue, ctl);
1857 g_async_queue_unref(lmw.queue);
1860 static int lowmem_press_eventfd_read(int fd)
1862 uint64_t dummy_state;
1864 return read(fd, &dummy_state, sizeof(dummy_state));
1867 static void lowmem_press_root_cgroup_handler(void)
1869 static unsigned int prev_available;
1870 unsigned int available;
1873 available = proc_get_mem_available();
1874 if (prev_available == available)
1877 mem_state = check_mem_state(available);
1878 lowmem_trigger_memory_state_action(mem_state);
1880 prev_available = available;
1883 static void lowmem_press_cgroup_handler(enum cgroup_type type, struct memcg_info *mi)
1885 unsigned int usage, threshold;
1888 ret = memcg_get_anon_usage(mi->name, &usage);
1890 _D("getting anonymous memory usage fails");
1894 threshold = mi->threshold[MEM_LEVEL_OOM];
1895 if (usage >= threshold)
1896 memory_cgroup_proactive_lmk_act(type, mi);
1898 _I("anon page %u MB < medium threshold %u MB", BYTE_TO_MBYTE(usage),
1899 BYTE_TO_MBYTE(threshold));
1902 static bool lowmem_press_eventfd_handler(int fd, void *data)
1904 struct memcg_info *mi;
1905 enum cgroup_type type = CGROUP_ROOT;
1907 // FIXME: probably shouldn't get ignored
1908 if (lowmem_press_eventfd_read(fd) < 0)
1909 _E("Failed to read lowmem press event, %m\n");
1911 for (type = CGROUP_ROOT; type < CGROUP_END; type++) {
1912 if (!get_cgroup_tree(type) || !get_memcg_info(type))
1914 mi = get_memcg_info(type);
1915 if (fd == mi->evfd) {
1916 /* call low memory handler for this memcg */
1917 if (type == CGROUP_ROOT)
1918 lowmem_press_root_cgroup_handler();
1920 lowmem_press_cgroup_handler(type, mi);
1929 static int lowmem_press_register_eventfd(struct memcg_info *mi)
1932 const char *name = mi->name;
1933 static fd_handler_h handler;
1935 if (mi->threshold[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
1938 evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
1942 int saved_errno = errno;
1943 _E("fail to register event press fd %s cgroup", name);
1944 return -saved_errno;
1949 _I("register event fd success for %s cgroup", name);
1950 add_fd_read_handler(evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
1954 static int lowmem_press_setup_eventfd(void)
1958 for (i = CGROUP_ROOT; i < CGROUP_END; i++) {
1959 if (!get_use_hierarchy(i))
1962 lowmem_press_register_eventfd(get_memcg_info(i));
1964 return RESOURCED_ERROR_NONE;
1967 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1969 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1972 int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold)
1974 struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1979 flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1980 victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1981 type = type > 0 ? type : CGROUP_LOW;
1982 threshold = threshold > 0 ? threshold : get_root_memcg_info()->threshold_leave;
1984 lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1985 LOWMEM_SET_REQUEST(ctl, flags,
1986 type, threshold, victims,
1987 lowmem_force_reclaim_cb);
1988 lowmem_queue_request(&lmw, ctl);
1993 void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size)
1997 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1998 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2000 size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
2001 _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
2002 lowmem_trigger_reclaim(0, victims, type, size);
2005 bool lowmem_fragmentated(void)
2007 struct buddyinfo bi;
2010 ret = proc_get_buddyinfo("Normal", &bi);
2015 * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
2016 * If total buddy pages is smaller than fragmentation_size,
2017 * resourced will detect kernel memory is fragmented.
2018 * Default value is zero in low memory device.
2020 if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
2021 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
2022 _I("fragmentation detected, need to execute proactive oom killer");
2028 static void lowmem_proactive_oom_killer(int flags, char *appid)
2030 unsigned int before;
2033 before = proc_get_mem_available();
2035 /* If memory state is medium or normal, just return and kill in oom killer */
2036 if (before < get_root_memcg_info()->threshold[MEM_LEVEL_OOM] || before > proactive_leave)
2039 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
2040 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
2042 #ifdef HEART_SUPPORT
2044 * This branch is used only when HEART module is compiled in and
2045 * it's MEMORY module must be enabled. Otherwise this is skipped.
2047 struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
2049 unsigned int rss, after, size;
2051 rss = KBYTE_TO_MBYTE(md->avg_rss);
2055 after = before - rss;
2057 * after launching app, ensure that available memory is
2058 * above threshold_leave
2060 if (after >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2063 if (proactive_threshold - rss >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
2064 size = proactive_threshold;
2066 size = rss + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
2068 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
2070 lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
2077 * When there is no history data for the launching app,
2078 * it is necessary to check current fragmentation state or application manifest file.
2079 * So, resourced feels proactive LMK is required, run oom killer based on dynamic
2082 if (lowmem_fragmentated())
2086 * run proactive oom killer only when available is larger than
2087 * dynamic process threshold
2089 if (!proactive_threshold || before >= proactive_threshold)
2092 if (!(flags & PROC_LARGEMEMORY))
2097 * free THRESHOLD_MARGIN more than real should be freed,
2098 * because launching app is consuming up the memory.
2100 _D("Run threshold based proactive LMK: memory level to reach: %u\n",
2101 proactive_leave + THRESHOLD_MARGIN);
2102 lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
2105 unsigned int lowmem_get_proactive_thres(void)
2107 return proactive_threshold;
2110 static int lowmem_prelaunch_handler(void *data)
2112 struct proc_status *ps = (struct proc_status *)data;
2113 struct proc_app_info *pai = ps->pai;
2115 if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
2116 return RESOURCED_ERROR_NONE;
2118 lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
2119 return RESOURCED_ERROR_NONE;
2122 int lowmem_control_handler(void *data)
2124 struct lowmem_control_data *lowmem_data;
2126 lowmem_data = (struct lowmem_control_data *)data;
2127 switch (lowmem_data->control_type) {
2128 case LOWMEM_MOVE_CGROUP:
2129 lowmem_move_memcgroup((pid_t)lowmem_data->pid,
2130 lowmem_data->oom_score_adj, lowmem_data->pai);
2135 return RESOURCED_ERROR_NONE;
2138 static int lowmem_bg_reclaim_handler(void *data)
2140 if (swap_get_state() != SWAP_ON)
2141 return RESOURCED_ERROR_NONE;
2144 return RESOURCED_ERROR_NONE;
2147 * Proactively reclaiming memory used by long-lived background processes
2148 * (such as widget instances) may be efficient on devices with limited
2149 * memory constraints. The pages used by such processes could be reclaimed
2150 * (if swap is enabled) earlier than they used to while minimizing the
2151 * impact on the user experience.
2153 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, get_memcg_info(CGROUP_MEDIUM)->name);
2155 return RESOURCED_ERROR_NONE;
2158 static int calculate_threshold_size(double ratio)
2160 int size = (double)totalram * ratio / 100.0;
2164 static void load_configs(const char *path)
2166 struct memcg_conf *memcg_conf = get_memcg_conf();
2168 /* set MemoryGroupLimit section */
2169 for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
2170 if (memcg_conf->cgroup_limit[cgroup] > 0.0)
2171 memcg_info_set_limit(get_memcg_info(cgroup),
2172 memcg_conf->cgroup_limit[cgroup]/100.0, totalram);
2175 /* set MemoryLevelThreshold section */
2176 for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
2177 if (memcg_conf->threshold[lvl].percent &&
2178 memcg_conf->threshold[lvl].threshold > 0)
2179 memcg_set_threshold(CGROUP_ROOT, lvl,
2180 calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
2181 else if (memcg_conf->threshold[lvl].threshold > 0)
2182 memcg_set_threshold(CGROUP_ROOT, lvl,
2183 memcg_conf->threshold[lvl].threshold);
2185 oom_popup_enable = memcg_conf->oom_popup;
2187 /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
2188 lowmem_memory_init(memcg_conf->service.memory, memcg_conf->widget.memory,
2189 memcg_conf->guiapp.memory, memcg_conf->background.memory);
2190 lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
2191 memcg_conf->guiapp.action, memcg_conf->background.action);
2196 static void print_mem_configs(void)
2198 /* print info of Memory section */
2199 for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
2200 _I("[DEBUG] set memory for cgroup '%s' to %u bytes",
2201 convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit);
2204 for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++)
2205 _I("[DEBUG] set threshold for memory level '%s' to %u MB",
2206 convert_memstate_to_str(mem_lvl), get_root_memcg_info()->threshold[mem_lvl]);
2208 _I("[DEBUG] set number of max victims as %d", num_max_victims);
2209 _I("[DEBUG] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
2210 _I("[DEBUG] set proactive threshold to %u MB", proactive_threshold);
2211 _I("[DEBUG] set proactive low memory killer leave to %u MB", proactive_leave);
2213 /* print info of POPUP section */
2214 _I("[DEBUG] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
2216 /* print info of BackgroundReclaim section */
2217 _I("[DEBUG] Background reclaim is %s", bg_reclaim == true ? "enabled" : "disabled");
2219 /* print info of Logging section */
2220 /* _I("memory logging is %s", memlog_enabled == 1 ? "enabled" : "disabled");
2221 _I("memory logging path is %s", memlog_path);
2222 _I("the max number of memory logging is %d", memlog_nr_max);
2223 _I("the batch threshold of memory log is %d", memlog_remove_batch_thres);
2224 _I("prefix of memps is %s", memlog_prefix[MEMLOG_MEMPS]);
2225 _I("prefix of memlimit memps is %s", memlog_prefix[MEMLOG_MEMPS_MEMLIMIT]);*/
2228 #include "file-helper.h"
2230 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
2231 static int lowmem_init(void)
2233 int ret = RESOURCED_ERROR_NONE;
2235 _D("[DEBUG] resourced memory init start");
2238 ret = cgroup_make_full_subdir(MEMCG_PATH);
2239 ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
2240 memcg_params_init();
2242 setup_memcg_params();
2244 /* default configuration */
2245 load_configs(MEM_CONF_FILE);
2247 /* this function should be called after parsing configurations */
2248 memcg_write_limiter_params();
2249 print_mem_configs();
2251 /* make a worker thread called low memory killer */
2252 ret = lowmem_activate_worker();
2254 _E("[DEBUG] oom thread create failed\n");
2258 /* register threshold and event fd */
2259 ret = lowmem_press_setup_eventfd();
2261 _E("[DEBUG] eventfd setup failed");
2266 lowmem_limit_init();
2267 lowmem_system_init();
2269 register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2270 register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2271 register_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2276 static int lowmem_exit(void)
2278 if (strncmp(event_level, MEMCG_DEFAULT_EVENT_LEVEL, sizeof(MEMCG_DEFAULT_EVENT_LEVEL)))
2281 lowmem_deactivate_worker();
2282 lowmem_limit_exit();
2283 lowmem_system_exit();
2285 unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
2286 unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
2287 unregister_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
2289 return RESOURCED_ERROR_NONE;
2292 static int resourced_memory_init(void *data)
2294 lowmem_ops = &memory_modules_ops;
2295 return lowmem_init();
2298 static int resourced_memory_finalize(void *data)
2300 return lowmem_exit();
2303 void lowmem_change_memory_state(int state, int force)
2310 unsigned int available = proc_get_mem_available();
2311 mem_state = check_mem_state(available);
2314 lowmem_trigger_memory_state_action(mem_state);
2317 unsigned long lowmem_get_ktotalram(void)
2322 unsigned long lowmem_get_totalram(void)
2327 void lowmem_restore_memcg(struct proc_app_info *pai)
2331 struct cgroup *cgroup = NULL;
2332 struct memcg_info *mi = NULL;
2333 pid_t pid = pai->main_pid;
2335 ret = cgroup_pid_get_path("memory", pid, &cgpath);
2339 for (index = CGROUP_END-1; index >= CGROUP_ROOT; index--) {
2340 cgroup = get_cgroup_tree(index);
2344 mi = cgroup->memcg_info;
2348 if (!strcmp(cgroup->hashname, ""))
2350 if (strstr(cgpath, cgroup->hashname))
2353 pai->memory.memcg_idx = index;
2354 pai->memory.memcg_info = mi;
2355 if(strstr(cgpath, pai->appid))
2356 pai->memory.use_mem_limit = true;
2361 static struct module_ops memory_modules_ops = {
2362 .priority = MODULE_PRIORITY_HIGH,
2364 .init = resourced_memory_init,
2365 .exit = resourced_memory_finalize,
2368 MODULE_REGISTER(&memory_modules_ops)