4 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file vmpressure-lowmem-handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
36 #include <sys/types.h>
39 #include <sys/sysinfo.h>
40 #include <sys/resource.h>
43 #include <eventsystem.h>
49 #include "lowmem-dbus.h"
50 #include "lowmem-system.h"
51 #include "lowmem-limit.h"
52 #include "proc-common.h"
55 #include "resourced.h"
58 #include "config-parser.h"
60 #include "swap-common.h"
62 #include "memory-cgroup.h"
63 #include "heart-common.h"
64 #include "proc-main.h"
65 #include "dbus-handler.h"
67 #include "fd-handler.h"
68 #include "resourced-helper-worker.h"
69 #include "dedup-common.h"
72 #define MAX_PROACTIVE_HIGH_VICTIMS 4
73 #define FOREGROUND_VICTIMS 1
74 #define OOM_KILLER_PRIORITY -20
75 #define THRESHOLD_MARGIN 10 /* MB */
77 #define MEM_SIZE_64 64 /* MB */
78 #define MEM_SIZE_256 256 /* MB */
79 #define MEM_SIZE_448 448 /* MB */
80 #define MEM_SIZE_512 512 /* MB */
81 #define MEM_SIZE_768 768 /* MB */
82 #define MEM_SIZE_1024 1024 /* MB */
83 #define MEM_SIZE_2048 2048 /* MB */
85 /* thresholds for 64M RAM*/
86 #define PROACTIVE_64_THRES 10 /* MB */
87 #define PROACTIVE_64_LEAVE 30 /* MB */
88 #define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
89 #define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
90 #define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
91 #define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
92 #define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
93 #define CGROUP_ROOT_64_NUM_VICTIMS 1
95 /* thresholds for 256M RAM */
96 #define PROACTIVE_256_THRES 50 /* MB */
97 #define PROACTIVE_256_LEAVE 80 /* MB */
98 #define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
99 #define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
100 #define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
101 #define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
102 #define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
103 #define CGROUP_ROOT_256_NUM_VICTIMS 2
105 /* threshold for 448M RAM */
106 #define PROACTIVE_448_THRES 80 /* MB */
107 #define PROACTIVE_448_LEAVE 100 /* MB */
108 #define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
109 #define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
110 #define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
111 #define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
112 #define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
113 #define CGROUP_ROOT_448_NUM_VICTIMS 5
115 /* threshold for 512M RAM */
116 #define PROACTIVE_512_THRES 80 /* MB */
117 #define PROACTIVE_512_LEAVE 100 /* MB */
118 #define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
119 #define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
120 #define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
121 #define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
122 #define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
123 #define CGROUP_ROOT_512_NUM_VICTIMS 5
125 /* threshold for 768 RAM */
126 #define PROACTIVE_768_THRES 100 /* MB */
127 #define PROACTIVE_768_LEAVE 130 /* MB */
128 #define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
129 #define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
130 #define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
131 #define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
132 #define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
133 #define CGROUP_ROOT_768_NUM_VICTIMS 5
135 /* threshold for more than 1024M RAM */
136 #define PROACTIVE_1024_THRES 150 /* MB */
137 #define PROACTIVE_1024_LEAVE 230 /* MB */
138 #define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
139 #define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
140 #define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
141 #define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
142 #define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
143 #define CGROUP_ROOT_1024_NUM_VICTIMS 5
145 /* threshold for more than 2048M RAM */
146 #define PROACTIVE_2048_THRES 200 /* MB */
147 #define PROACTIVE_2048_LEAVE 500 /* MB */
148 #define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
149 #define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
150 #define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
151 #define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
152 #define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
153 #define CGROUP_ROOT_2048_NUM_VICTIMS 10
155 /* threshold for more than 3072M RAM */
156 #define PROACTIVE_3072_THRES 300 /* MB */
157 #define PROACTIVE_3072_LEAVE 700 /* MB */
158 #define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
159 #define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
160 #define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
161 #define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
162 #define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
163 #define CGROUP_ROOT_3072_NUM_VICTIMS 10
165 static unsigned proactive_threshold_mb;
166 static unsigned proactive_leave_mb;
167 static unsigned lmk_start_threshold_mb;
170 * Resourced Low Memory Killer
171 * NOTE: planned to be moved to a separate file.
173 /*-------------------------------------------------*/
174 #define OOM_TIMER_INTERVAL_SEC 2
175 #define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
176 #define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
178 struct lowmem_control {
180 * For each queued request the following properties
181 * are required with two exceptions:
182 * - status is being set by LMK
183 * - callback is optional
185 /* Processing flags*/
187 /* Indictator for OOM score of targeted processes */
188 enum oom_score score;
190 /* Desired size to be restored - level to be reached (MB)*/
191 unsigned int size_mb;
192 /* Max number of processes to be considered */
194 /* Memory reclaim status */
197 * Optional - if set, will be triggered by LMK once the request
200 void (*callback) (struct lowmem_control *);
203 struct lowmem_worker {
204 pthread_t worker_thread;
210 static struct lowmem_worker lmw;
212 /* Arrays for storing kill candidates and apps/procs */
213 static GArray *lowmem_kill_candidates = NULL;
214 static GArray *lowmem_task_info_app_array = NULL;
215 static GArray *lowmem_task_info_proc_array = NULL;
217 //static int memlog_enabled;
218 //static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
219 /* remove logfiles to reduce to this threshold.
220 * it is about five-sixths of the memlog_nr_max. */
221 //static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
222 //static char *memlog_path = DEFAULT_MEMLOG_PATH;
223 //static char *memlog_prefix[MEMLOG_MAX];
225 #define LOWMEM_WORKER_IS_ACTIVE(_lmw) g_atomic_int_get(&(_lmw)->active)
226 #define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
227 #define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
229 #define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
230 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
231 #define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
233 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
235 #define LOWMEM_DESTROY_REQUEST(_ctl) \
236 g_slice_free(typeof(*(_ctl)), _ctl); \
238 #define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
240 (c)->flags = __flags; (c)->score = __score; \
241 (c)->size_mb= __size; (c)->count = __count; \
242 (c)->callback = __cb; \
245 static void lowmem_queue_request(struct lowmem_worker *lmw,
246 struct lowmem_control *ctl)
248 if (LOWMEM_WORKER_IS_ACTIVE(lmw))
249 g_async_queue_push(lmw->queue, ctl);
253 static void lowmem_drain_queue(struct lowmem_worker *lmw)
255 struct lowmem_control *ctl;
257 g_async_queue_lock(lmw->queue);
258 while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
261 LOWMEM_DESTROY_REQUEST(ctl);
263 g_async_queue_unlock(lmw->queue);
266 static void lowmem_request_destroy(gpointer data)
268 struct lowmem_control *ctl = (struct lowmem_control*) data;
272 LOWMEM_DESTROY_REQUEST(ctl);
275 /*-------------------------------------------------*/
277 /* low memory action function for cgroup */
278 /* low memory action function */
279 static void lmk_act(void);
281 struct lowmem_controller_ops {
282 int (*governor)(void *data);
283 int (*action)(void *data);
285 static struct lowmem_controller_ops lowmem_actions[MEM_LEVEL_MAX] = { NULL };
286 void lowmem_initialize_controller_ops_governor(int mem_state, int (*governor)(void *data))
290 case MEM_LEVEL_MEDIUM:
292 case MEM_LEVEL_CRITICAL:
293 lowmem_actions[mem_state].governor = governor;
299 void lowmem_initialize_controller_ops_action(int mem_state, int (*action)(void *data))
303 case MEM_LEVEL_MEDIUM:
304 case MEM_LEVEL_CRITICAL:
305 lowmem_actions[mem_state].action = action;
312 static size_t cur_mem_state = MEM_LEVEL_HIGH;
313 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
315 static unsigned long long totalram_bytes;
316 static unsigned long totalram_kb;
318 static bool oom_popup_enable;
319 static bool oom_popup;
320 static bool memcg_swap_status;
321 bool lowmem_get_memcg_swap_status()
323 return memcg_swap_status;
325 void lowmem_set_memcg_swap_status(bool status)
327 memcg_swap_status = status;
329 static int fragmentation_size;
331 const char *lowmem_convert_cgroup_type_to_str(int type)
333 static const char *type_table[] =
335 if (type >= MEMCG_ROOT && type <= MEMCG_THROTTLING)
336 return type_table[type];
341 static const char *convert_status_to_str(int status)
343 static const char *status_table[] =
344 {"none", "done", "drop", "cont", "retry", "next_type"};
345 if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
346 return status_table[status];
347 return "error status";
350 static const char *convert_memstate_to_str(int mem_state)
352 static const char *state_table[] = {"mem high", "mem medium",
353 "mem low", "mem critical", "mem oom",};
354 if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
355 return state_table[mem_state];
359 static int lowmem_launch_oompopup(void)
361 GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
362 g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
364 GVariant *const params = g_variant_new("(a{ss})", gv_builder);
365 g_variant_builder_unref(gv_builder);
367 int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
368 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
369 "PopupLaunch", params);
371 g_variant_unref(params);
376 static inline void get_total_memory(void)
383 totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
384 totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
386 register_totalram_bytes(totalram_bytes);
389 _E("Failed to get total ramsize from the kernel");
393 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
395 unsigned int size_kb = 0, total_size_kb = 0;
400 * If pids are allocated only when there are multiple processes with
401 * the same pgid e.g., browser and web process. Mostly, single process
404 if (tsk->pids == NULL) {
405 ret = proc_get_ram_usage(tsk->pid, &size_kb);
407 /* If there is no proc entry for given pid the process
408 * should be abandoned during further processing
411 _D("failed to get rss memory usage of %d", tsk->pid);
416 for (index = 0; index < tsk->pids->len; index++) {
417 pid = g_array_index(tsk->pids, pid_t, index);
418 ret = proc_get_ram_usage(pid, &size_kb);
419 if (ret != RESOURCED_ERROR_NONE)
421 total_size_kb += size_kb;
424 return total_size_kb;
427 static void lowmem_free_task_info_array(GArray *array)
434 for (i = 0; i < array->len; i++) {
435 struct task_info *tsk;
437 tsk = &g_array_index(array, struct task_info, i);
439 g_array_free(tsk->pids, true);
442 g_array_free(array, true);
445 static inline int is_dynamic_process_killer(int flags)
447 return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
450 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
452 unsigned int available = proc_get_mem_available();
453 unsigned int should_be_freed_mb = 0;
455 if (available < thres)
456 should_be_freed_mb = thres - available;
458 * free THRESHOLD_MARGIN more than real should be freed,
459 * because launching app is consuming up the memory.
461 if (should_be_freed_mb > 0)
462 should_be_freed_mb += THRESHOLD_MARGIN;
466 return should_be_freed_mb;
469 static void lowmem_oom_popup_once(void)
471 if (oom_popup_enable && !oom_popup) {
472 lowmem_launch_oompopup();
477 static int get_privilege(pid_t pid, char *name, size_t len)
480 char attr[MAX_NAME_LENGTH];
484 snprintf(path, sizeof(path), PROC_APP_ATTR_PATH, pid);
486 fp = fopen(path, "r");
490 attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
495 attr[attr_len] = '\0';
497 snprintf(name, len, "%s", attr);
501 static int is_app(pid_t pid)
503 char attr[MAX_NAME_LENGTH];
507 ret = get_privilege(pid, attr, sizeof(attr));
509 _E("Failed to get privilege of PID=%d, ret=%d.", pid, ret);
513 len = strlen(attr) + 1;
515 if (!strncmp("System", attr, len))
518 if (!strncmp("User", attr, len))
521 if (!strncmp("System::Privileged", attr, len))
527 static GArray *lowmem_get_task_info_app(int killer_flags, int start_oom, int end_oom)
530 GSList *proc_app_list = proc_app_list_open();
532 if (!lowmem_task_info_app_array)
533 lowmem_task_info_app_array = g_array_new(false, false, sizeof(struct task_info));
535 gslist_for_each_item(iter, proc_app_list) {
536 struct proc_app_info *pai = (struct proc_app_info *)(iter->data);
537 struct task_info task;
542 if (pai->memory.oom_score_adj > end_oom
543 || pai->memory.oom_score_adj < start_oom)
546 if ((killer_flags & OOM_REVISE) && pai->memory.oom_killed) {
548 * If it is not the first attempt to kill this app and
549 * the app is already killed
554 task.pid = pai->main_pid;
556 task.pids = g_array_new(false, false, sizeof(pid_t));
557 g_array_append_val(task.pids, task.pid);
558 for (GSList *iter_child = pai->childs; iter_child != NULL; iter_child = g_slist_next(iter_child)) {
559 pid_t child = GPOINTER_TO_PID(iter_child->data);
560 g_array_append_val(task.pids, child);
565 task.pgid = getpgid(task.pid);
566 task.oom_score_adj = pai->memory.oom_score_adj;
567 task.size = lowmem_get_task_mem_usage_rss(&task); /* KB */
568 task.proc_app_info_oom_killed = &(pai->memory.oom_killed);
569 task.proc_app_info_flags = pai->flags;
572 * Before oom_score_adj of favourite (oom_score = 270)
573 * applications is independent of lru_state, now we consider
574 * lru_state, while killing favourite process.
576 if (task.oom_score_adj == OOMADJ_FAVORITE
577 && pai->lru_state >= PROC_BACKGROUND) {
579 OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE
582 task.oom_score_lru = pai->memory.oom_score_adj;
585 g_array_append_val(lowmem_task_info_app_array, task);
588 proc_app_list_close();
590 g_array_ref(lowmem_task_info_app_array);
591 return lowmem_task_info_app_array;
594 static GArray *lowmem_get_task_info_proc()
597 struct dirent *dentry = NULL;
599 dp = opendir("/proc");
601 _E("fail to open /proc");
605 if (!lowmem_task_info_proc_array)
606 lowmem_task_info_proc_array = g_array_new(false, false, sizeof(struct task_info));
607 while ((dentry = readdir(dp)) != NULL) {
608 struct task_info task;
610 int oom_score_adj = 0;
612 if (!isdigit(dentry->d_name[0]))
615 pid = (pid_t)atoi(dentry->d_name);
617 continue; /* skip invalid pids or kernel processes */
626 if (proc_get_oom_score_adj(pid, &oom_score_adj) < 0) {
627 _D("pid(%d) was already terminated", pid);
632 * Check whether this array includes applications or not.
633 * If it doesn't require to get applications
634 * and pid has been already included in pai,
637 if (oom_score_adj > OOMADJ_SU && oom_score_adj <= OOMADJ_APP_MAX)
641 * Currently, for tasks in the memory cgroup,
642 * do not consider multiple tasks with one pgid.
647 task.oom_score_adj = oom_score_adj;
648 task.oom_score_lru = oom_score_adj;
649 task.size = lowmem_get_task_mem_usage_rss(&task);
651 * This task is not an app, so field variables below are not
652 * used in this task. If not app, oom_killed is NULL.
654 task.proc_app_info_oom_killed = NULL;
655 task.proc_app_info_flags = -1;
657 g_array_append_val(lowmem_task_info_proc_array, task);
662 g_array_ref(lowmem_task_info_proc_array);
663 return lowmem_task_info_proc_array;
666 struct lowmem_governor_ops {
667 int(*get_kill_candidates)(GArray *, GArray *, GArray *, unsigned long);
670 static struct lowmem_governor_ops governor_ops = { NULL };
671 void lowmem_initialize_governor_ops(int(*get_kill_candidates)(GArray *,
672 GArray *, GArray *, unsigned long))
674 governor_ops.get_kill_candidates = get_kill_candidates;
677 static int(*lowmem_controller_kill_candidates)(GArray *, unsigned, unsigned int,
678 int, int, int *, unsigned int *,
679 unsigned, void(*)(void));
680 void lowmem_initialize_kill_candidates(int(*kill_candidates)(GArray *, unsigned,
681 unsigned int, int, int, int *,
682 unsigned int *, unsigned,
685 lowmem_controller_kill_candidates = kill_candidates;
689 * @brief Terminate up to max_victims processes after finding them from pai.
690 It depends on proc_app_info lists
691 and it also reference systemservice cgroup
692 because some processes in this group don't have proc_app_info.
694 * @max_victims: max number of processes to be terminated
695 * @start_oom: find victims from start oom adj score value
696 * @end_oom: find victims to end oom adj score value
697 * @should_be_freed: amount of memory to be reclaimed (in MB)
698 * @total_size[out]: total size of possibly reclaimed memory (required)
699 * @completed: final outcome (optional)
700 * @threshold: desired value of memory available
702 static int lowmem_kill_victims(int max_victims,
703 int start_oom, int end_oom, unsigned should_be_freed, int flags,
704 unsigned int *total_size, int *completed, unsigned int threshold)
706 unsigned int total_victim_size = 0;
707 int candidates_cnt = 0;
709 int status = LOWMEM_RECLAIM_NONE;
710 GArray *task_info_app_array = NULL;
711 GArray *task_info_proc_array = NULL;
713 task_info_app_array = lowmem_get_task_info_app(flags, start_oom, end_oom);
715 * If start_oom == OOMADJ_SU, processes in /proc will be
716 * the lowmem_kill_candidates to handle low memory situation.
717 * Malicious system process can be found even though it has
720 task_info_proc_array = (start_oom == OOMADJ_SU)
721 ? lowmem_get_task_info_proc()
724 /* Get the victim candidates from lowmem governor */
725 if (!lowmem_kill_candidates)
726 lowmem_kill_candidates = g_array_new(false, false, sizeof(struct task_info *));
728 assert(governor_ops.get_kill_candidates != NULL);
729 candidates_cnt = governor_ops.get_kill_candidates(
730 lowmem_kill_candidates,
732 task_info_proc_array,
735 _D("[LMK] candidates_cnt=%d", candidates_cnt);
736 if (candidates_cnt <= 0) {
737 status = LOWMEM_RECLAIM_NEXT_TYPE;
741 assert(lowmem_controller_kill_candidates != NULL);
742 victim_cnt = lowmem_controller_kill_candidates(lowmem_kill_candidates,
743 should_be_freed, threshold,
745 &status, &total_victim_size,
746 lmk_start_threshold_mb,
747 lowmem_oom_popup_once);
749 if (lowmem_kill_candidates) {
750 /* Prevents the GArray to be really freed */
751 g_array_ref(lowmem_kill_candidates);
752 g_array_free(lowmem_kill_candidates, true);
754 lowmem_free_task_info_array(task_info_app_array);
755 lowmem_free_task_info_array(task_info_proc_array);
756 *total_size = total_victim_size;
757 if(*completed != LOWMEM_RECLAIM_CONT)
760 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
764 static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
766 if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
767 _E("[LMK] oom score (%d) is out of scope", score);
768 return RESOURCED_ERROR_FAIL;
771 *max = cgroup_get_highest_oom_score_adj(score);
772 *min = cgroup_get_lowest_oom_score_adj(score);
774 return RESOURCED_ERROR_NONE;
777 static void lowmem_handle_request(struct lowmem_control *ctl)
779 int start_oom, end_oom;
780 int count = 0, victim_cnt = 0;
781 int max_victim_cnt = ctl->count;
782 int status = LOWMEM_RECLAIM_NONE;
783 unsigned int available_mb = 0;
784 unsigned int total_size_mb = 0;
785 unsigned int current_size = 0;
786 unsigned int reclaim_size_mb, shortfall_mb = 0;
787 enum oom_score oom_score = ctl->score;
789 available_mb = proc_get_mem_available();
790 reclaim_size_mb = ctl->size_mb > available_mb /* MB */
791 ? ctl->size_mb - available_mb : 0;
793 if (!reclaim_size_mb) {
794 status = LOWMEM_RECLAIM_DONE;
799 /* Prepare LMK to start doing it's job. Check preconditions. */
800 if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
803 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
804 shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
806 if (!shortfall_mb || !reclaim_size_mb) {
807 status = LOWMEM_RECLAIM_DONE;
813 victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
814 reclaim_size_mb, ctl->flags, ¤t_size, &status, ctl->size_mb);
817 current_size = KBYTE_TO_MBYTE(current_size);
818 reclaim_size_mb -= reclaim_size_mb > current_size
819 ? current_size : reclaim_size_mb;
820 total_size_mb += current_size;
822 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
823 victim_cnt, current_size,
824 start_oom, end_oom, convert_status_to_str(status));
827 if ((status == LOWMEM_RECLAIM_DONE) ||
828 (status == LOWMEM_RECLAIM_DROP) ||
829 (status == LOWMEM_RECLAIM_RETRY))
833 * If it doesn't finish reclaiming memory in first operation,
834 - if flags has OOM_IN_DEPTH,
835 try to find victims again in the active cgroup.
836 otherwise, just return because there is no more victims in the desired cgroup.
837 - if flags has OOM_REVISE,
838 it means that resourced can't find victims from proc_app_list.
839 So, it should search victims or malicious process from /proc.
840 But searching /proc leads to abnormal behaviour.
841 (Make sluggish or kill same victims continuously)
842 Thus, otherwise, just return in first operation and wait some period.
844 if (oom_score == OOM_SCORE_LOW) {
845 oom_score = OOM_SCORE_MEDIUM;
847 } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
848 oom_score = OOM_SCORE_HIGH;
849 if(ctl->flags & OOM_FORCE)
850 max_victim_cnt = FOREGROUND_VICTIMS;
852 } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
853 status = LOWMEM_RECLAIM_RETRY;
854 ctl->score = OOM_SCORE_MAX;
856 else if (oom_score == OOM_SCORE_MAX) {
857 status = LOWMEM_RECLAIM_RETRY;
860 _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
861 count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
863 /* After we finish reclaiming it's worth to remove oldest memps logs */
864 ctl->status = status;
867 static void *lowmem_reclaim_worker(void *arg)
869 struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
871 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
873 g_async_queue_ref(lmw->queue);
877 struct lowmem_control *ctl;
879 LOWMEM_WORKER_IDLE(lmw);
880 /* Wait on any wake-up call */
881 ctl = g_async_queue_pop(lmw->queue);
884 _W("[LMK] ctl structure is NULL");
888 if ((ctl->flags & OOM_DROP) || !LOWMEM_WORKER_IS_ACTIVE(lmw)) {
889 LOWMEM_DESTROY_REQUEST(ctl);
893 LOWMEM_WORKER_RUN(lmw);
895 _D("[LMK] %d tries", ++try_count);
896 lowmem_handle_request(ctl);
898 * Case the process failed to reclaim requested amount of memory
899 * or still under have memory pressure - try the timeout wait.
900 * There is a chance this will get woken-up in a better reality.
902 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
903 !(ctl->flags & OOM_SINGLE_SHOT)) {
904 unsigned int available_mb = proc_get_mem_available();
906 if (available_mb >= ctl->size_mb) {
907 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
908 ctl->size_mb, available_mb);
909 ctl->status = LOWMEM_RECLAIM_DONE;
912 LOWMEM_DESTROY_REQUEST(ctl);
913 LOWMEM_WORKER_IDLE(lmw);
917 if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
918 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
919 ctl->flags |= OOM_REVISE;
925 * The ctl callback would check available size again.
926 * And it is last point in reclaiming worker.
927 * Resourced sent SIGKILL signal to victim processes
928 * so it should wait for a some seconds until each processes returns memory.
930 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
934 /* The lmk becomes the owner of all queued requests .. */
935 LOWMEM_DESTROY_REQUEST(ctl);
936 LOWMEM_WORKER_IDLE(lmw);
938 g_async_queue_unref(lmw->queue);
940 /* Free GArrays to save kill candidates and apps/procs */
941 if (lowmem_kill_candidates)
942 g_array_free(lowmem_kill_candidates, true);
943 lowmem_free_task_info_array(lowmem_task_info_app_array);
944 lowmem_free_task_info_array(lowmem_task_info_proc_array);
949 unsigned int lowmem_get_lowmem_state()
951 return cur_mem_state;
953 void lowmem_change_lowmem_state(unsigned int mem_state)
955 cur_mem_state = mem_state;
956 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
958 resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
959 (void *)&cur_mem_state);
962 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
966 int lowest_oom_score_adj;
969 _E("[SWAP] Unknown memory cgroup path to swap");
973 /* In this case, corresponding process will be moved to memory MEMCG_THROTTLING.
976 error = proc_get_oom_score_adj(pid, &oom_score_adj);
978 _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
982 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
984 if (oom_score_adj < lowest_oom_score_adj) {
985 oom_score_adj = lowest_oom_score_adj;
986 /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
987 proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
992 /* Correponding process is already managed per app or service.
993 * In addition, if some process is already located in the MEMCG_THROTTLING, then just do swap
995 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
998 void lowmem_memory_level_send_system_event(int lv)
1004 case MEM_LEVEL_HIGH:
1005 case MEM_LEVEL_MEDIUM:
1007 str = EVT_VAL_MEMORY_NORMAL;
1009 case MEM_LEVEL_CRITICAL:
1010 str = EVT_VAL_MEMORY_SOFT_WARNING;
1013 str = EVT_VAL_MEMORY_HARD_WARNING;
1016 _E("Invalid state");
1020 b = bundle_create();
1022 _E("Failed to create bundle");
1026 bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
1027 eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
1031 static void medium_cb(struct lowmem_control *ctl)
1033 if (ctl->status == LOWMEM_RECLAIM_DONE)
1035 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1038 static void lmk_act(void)
1040 unsigned int available_mb;
1042 int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
1045 * Don't trigger reclaim worker
1046 * if it is already running
1048 if (LOWMEM_WORKER_IS_RUNNING(&lmw))
1051 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1053 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1055 lowmem_memory_level_send_system_event(MEM_LEVEL_OOM);
1056 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
1057 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
1058 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1059 (void *)CGROUP_FREEZER_PAUSED);
1060 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1061 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1063 available_mb = proc_get_mem_available();
1065 lowmem_change_lowmem_state(MEM_LEVEL_OOM);
1067 if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
1068 struct lowmem_control *ctl;
1070 ctl = LOWMEM_NEW_REQUEST();
1072 LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
1073 OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
1074 num_max_victims, medium_cb);
1075 lowmem_queue_request(&lmw, ctl);
1079 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
1082 * Flush resourced memory such as other processes.
1083 * Resourced can use both many fast bins and sqlite3 cache memery.
1090 void lowmem_trigger_memory_state_action(int mem_state)
1093 * Check if the state we want to set is different from current
1094 * But it should except this condition if mem_state is already medium.
1095 * Otherwise, recalim worker couldn't run any more.
1097 if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
1100 switch (mem_state) {
1101 case MEM_LEVEL_HIGH:
1102 case MEM_LEVEL_MEDIUM:
1104 case MEM_LEVEL_CRITICAL:
1105 assert(lowmem_actions[mem_state].governor != NULL);
1106 assert(lowmem_actions[mem_state].action != NULL);
1107 if (lowmem_actions[mem_state].governor(NULL) < 0)
1109 lowmem_actions[mem_state].action(NULL);
1119 unsigned int lowmem_check_mem_state(unsigned int available_mb)
1122 for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
1123 if (mem_state != MEM_LEVEL_OOM &&
1124 available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
1126 else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
1133 /* setup memcg parameters depending on total ram size. */
1134 static void setup_memcg_params(void)
1136 unsigned long total_ramsize_mb;
1139 total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
1141 _D("Total: %lu MB", total_ramsize_mb);
1142 if (total_ramsize_mb <= MEM_SIZE_64) {
1143 /* set thresholds for ram size 64M */
1144 proactive_threshold_mb = PROACTIVE_64_THRES;
1145 proactive_leave_mb = PROACTIVE_64_LEAVE;
1146 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
1147 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
1148 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
1149 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
1150 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
1151 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
1152 } else if (total_ramsize_mb <= MEM_SIZE_256) {
1153 /* set thresholds for ram size 256M */
1154 proactive_threshold_mb = PROACTIVE_256_THRES;
1155 proactive_leave_mb = PROACTIVE_256_LEAVE;
1156 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
1157 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
1158 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
1159 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
1160 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
1161 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
1162 } else if (total_ramsize_mb <= MEM_SIZE_448) {
1163 /* set thresholds for ram size 448M */
1164 proactive_threshold_mb = PROACTIVE_448_THRES;
1165 proactive_leave_mb = PROACTIVE_448_LEAVE;
1166 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
1167 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
1168 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
1169 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1170 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1171 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1172 } else if (total_ramsize_mb <= MEM_SIZE_512) {
1173 /* set thresholds for ram size 512M */
1174 proactive_threshold_mb = PROACTIVE_512_THRES;
1175 proactive_leave_mb = PROACTIVE_512_LEAVE;
1176 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1177 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1178 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1179 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1180 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1181 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1182 } else if (total_ramsize_mb <= MEM_SIZE_768) {
1183 /* set thresholds for ram size 768M */
1184 proactive_threshold_mb = PROACTIVE_768_THRES;
1185 proactive_leave_mb = PROACTIVE_768_LEAVE;
1186 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1187 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1188 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1189 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1190 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1191 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1192 } else if (total_ramsize_mb <= MEM_SIZE_1024) {
1193 /* set thresholds for ram size more than 1G */
1194 proactive_threshold_mb = PROACTIVE_1024_THRES;
1195 proactive_leave_mb = PROACTIVE_1024_LEAVE;
1196 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1197 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1198 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1199 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1200 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1201 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1202 } else if (total_ramsize_mb <= MEM_SIZE_2048) {
1203 proactive_threshold_mb = PROACTIVE_2048_THRES;
1204 proactive_leave_mb = PROACTIVE_2048_LEAVE;
1205 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1206 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1207 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1208 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1209 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1210 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1212 proactive_threshold_mb = PROACTIVE_3072_THRES;
1213 proactive_leave_mb = PROACTIVE_3072_LEAVE;
1214 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1215 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1216 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1217 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1218 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1219 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1223 static int lowmem_activate_worker(void)
1225 int ret = RESOURCED_ERROR_NONE;
1227 if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1231 lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1233 _E("Failed to create request queue\n");
1234 return RESOURCED_ERROR_FAIL;
1236 LOWMEM_WORKER_ACTIVATE(&lmw);
1237 ret = pthread_create(&lmw.worker_thread, NULL,
1238 (void *)lowmem_reclaim_worker, (void *)&lmw);
1240 LOWMEM_WORKER_DEACTIVATE(&lmw);
1241 _E("Failed to create LMK thread: %d\n", ret);
1243 pthread_detach(lmw.worker_thread);
1244 ret = RESOURCED_ERROR_NONE;
1249 static void lowmem_deactivate_worker(void)
1251 struct lowmem_control *ctl;
1253 if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1256 LOWMEM_WORKER_DEACTIVATE(&lmw);
1257 lowmem_drain_queue(&lmw);
1259 ctl = LOWMEM_NEW_REQUEST();
1261 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1264 ctl->flags = OOM_DROP;
1265 g_async_queue_push(lmw.queue, ctl);
1266 g_async_queue_unref(lmw.queue);
1269 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1271 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1274 int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
1276 struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1281 flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1282 victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1283 score = score > 0 ? score : OOM_SCORE_LOW;
1284 threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
1286 lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1287 LOWMEM_SET_REQUEST(ctl, flags,
1288 score, threshold_mb, victims,
1289 lowmem_force_reclaim_cb);
1290 lowmem_queue_request(&lmw, ctl);
1295 void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
1297 int size_mb, victims;
1299 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1300 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1302 size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
1303 lowmem_trigger_reclaim(0, victims, score, size_mb);
1306 bool lowmem_fragmentated(void)
1308 struct buddyinfo bi;
1311 ret = proc_get_buddyinfo("Normal", &bi);
1316 * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
1317 * If total buddy pages is smaller than fragmentation_size,
1318 * resourced will detect kernel memory is fragmented.
1319 * Default value is zero in low memory device.
1321 if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
1322 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
1323 _I("fragmentation detected, need to execute proactive oom killer");
1329 static void lowmem_proactive_oom_killer(int flags, char *appid)
1331 unsigned int before_mb;
1334 before_mb = proc_get_mem_available();
1336 /* If memory state is medium or normal, just return and kill in oom killer */
1337 if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
1338 before_mb > proactive_leave_mb)
1341 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1342 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1344 #ifdef HEART_SUPPORT
1346 * This branch is used only when HEART module is compiled in and
1347 * it's MEMORY module must be enabled. Otherwise this is skipped.
1349 struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
1351 unsigned int rss_mb, after_mb, size_mb;
1353 rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
1357 after_mb = before_mb - rss_mb;
1359 * after launching app, ensure that available memory is
1360 * above threshold_leave
1362 if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1365 if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1366 size_mb = proactive_threshold_mb;
1368 size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
1370 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
1371 rss_mb, before_mb, size_mb);
1372 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
1379 * When there is no history data for the launching app,
1380 * it is necessary to check current fragmentation state or application manifest file.
1381 * So, resourced feels proactive LMK is required, run oom killer based on dynamic
1384 if (lowmem_fragmentated())
1388 * run proactive oom killer only when available is larger than
1389 * dynamic process threshold
1391 if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
1394 if (!(flags & PROC_LARGEMEMORY))
1399 * free THRESHOLD_MARGIN more than real should be freed,
1400 * because launching app is consuming up the memory.
1402 _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
1403 proactive_leave_mb + THRESHOLD_MARGIN);
1404 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
1407 unsigned int lowmem_get_proactive_thres(void)
1409 return proactive_threshold_mb;
1412 static int lowmem_prelaunch_handler(void *data)
1414 struct proc_status *ps = (struct proc_status *)data;
1415 struct proc_app_info *pai = ps->pai;
1417 if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
1418 return RESOURCED_ERROR_NONE;
1420 lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
1421 return RESOURCED_ERROR_NONE;
1424 static inline int calculate_threshold_size(double ratio)
1426 unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
1427 return BYTE_TO_MBYTE(size_bytes);
1430 static void load_configs(void)
1432 struct memcg_conf *memcg_conf = get_memcg_conf();
1434 /* set MemoryGroupLimit section */
1435 for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
1436 if (memcg_conf->cgroup_limit[cgroup] > 0.0)
1437 memcg_info_set_limit(get_memcg_info(cgroup),
1438 memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
1441 /* set MemoryLevelThreshold section */
1442 for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
1443 if (memcg_conf->threshold[lvl].percent &&
1444 memcg_conf->threshold[lvl].threshold > 0) {
1445 memcg_set_threshold(MEMCG_ROOT, lvl,
1446 calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
1448 if (lvl == MEM_LEVEL_OOM) {
1449 memcg_set_leave_threshold(MEMCG_ROOT,
1450 get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
1451 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1452 proactive_leave_mb = proactive_threshold_mb * 1.5;
1455 else if (memcg_conf->threshold[lvl].threshold > 0) {
1456 memcg_set_threshold(MEMCG_ROOT, lvl,
1457 memcg_conf->threshold[lvl].threshold);
1459 if (lvl == MEM_LEVEL_OOM) {
1460 memcg_set_leave_threshold(MEMCG_ROOT,
1461 get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
1462 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1463 proactive_leave_mb = proactive_threshold_mb * 1.5;
1468 oom_popup_enable = memcg_conf->oom_popup;
1470 /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
1471 lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
1472 memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
1473 lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
1474 memcg_conf->guiapp.action, memcg_conf->background.action);
1479 static void print_mem_configs(void)
1481 /* print info of Memory section */
1482 for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
1483 _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
1484 lowmem_convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
1487 for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
1488 for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
1489 _I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", lowmem_convert_cgroup_type_to_str(cgroup),
1490 convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
1494 _I("[LMK] set number of max victims as %d", num_max_victims);
1495 _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
1496 _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
1497 _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
1499 /* print info of POPUP section */
1500 _I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
1503 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
1504 static int lowmem_init(void)
1506 int ret = RESOURCED_ERROR_NONE;
1508 _D("resourced memory init start");
1511 ret = memcg_make_full_subdir(MEMCG_PATH);
1512 ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
1513 memcg_params_init();
1515 setup_memcg_params();
1517 /* default configuration */
1520 /* this function should be called after parsing configurations */
1521 memcg_write_limiter_params();
1522 print_mem_configs();
1524 /* make a worker thread called low memory killer */
1525 ret = lowmem_activate_worker();
1527 _E("[LMK] oom thread create failed\n");
1532 lowmem_limit_init();
1533 lowmem_system_init();
1535 register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1540 static int lowmem_exit(void)
1542 lowmem_deactivate_worker();
1543 lowmem_limit_exit();
1544 lowmem_system_exit();
1546 unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1548 return RESOURCED_ERROR_NONE;
1551 static int resourced_memory_init(void *data)
1553 return lowmem_init();
1556 static int resourced_memory_finalize(void *data)
1558 return lowmem_exit();
1561 void lowmem_change_memory_state(int state, int force)
1568 unsigned int available_mb = proc_get_mem_available();
1569 mem_state = lowmem_check_mem_state(available_mb);
1572 lowmem_trigger_memory_state_action(mem_state);
1575 unsigned long lowmem_get_ktotalram(void)
1580 unsigned long long lowmem_get_totalram(void)
1582 return totalram_bytes;
1585 void lowmem_restore_memcg(struct proc_app_info *pai)
1589 struct cgroup *cgroup = NULL;
1590 struct memcg_info *mi = NULL;
1591 pid_t pid = pai->main_pid;
1593 ret = cgroup_pid_get_path("memory", pid, &cgpath);
1597 for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
1598 cgroup = get_cgroup_tree(index);
1602 mi = cgroup->memcg_info;
1606 if (!strcmp(cgroup->hashname, ""))
1608 if (strstr(cgpath, cgroup->hashname))
1611 pai->memory.memcg_idx = index;
1612 pai->memory.memcg_info = mi;
1613 if(strstr(cgpath, pai->appid))
1614 pai->memory.use_mem_limit = true;
1619 static struct module_ops memory_modules_ops = {
1620 .priority = MODULE_PRIORITY_EARLY,
1622 .init = resourced_memory_init,
1623 .exit = resourced_memory_finalize,
1626 MODULE_REGISTER(&memory_modules_ops)