4 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file vmpressure-lowmem-handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
36 #include <sys/types.h>
39 #include <sys/sysinfo.h>
40 #include <sys/resource.h>
43 #include <eventsystem.h>
49 #include "lowmem-dbus.h"
50 #include "lowmem-system.h"
51 #include "lowmem-limit.h"
52 #include "proc-common.h"
55 #include "resourced.h"
58 #include "config-parser.h"
60 #include "swap-common.h"
62 #include "memory-cgroup.h"
63 #include "heart-common.h"
64 #include "proc-main.h"
65 #include "dbus-handler.h"
67 #include "fd-handler.h"
68 #include "resourced-helper-worker.h"
69 #include "dedup-common.h"
72 #define MAX_PROACTIVE_HIGH_VICTIMS 4
73 #define FOREGROUND_VICTIMS 1
74 #define OOM_KILLER_PRIORITY -20
75 #define THRESHOLD_MARGIN 10 /* MB */
77 #define MEM_SIZE_64 64 /* MB */
78 #define MEM_SIZE_256 256 /* MB */
79 #define MEM_SIZE_448 448 /* MB */
80 #define MEM_SIZE_512 512 /* MB */
81 #define MEM_SIZE_768 768 /* MB */
82 #define MEM_SIZE_1024 1024 /* MB */
83 #define MEM_SIZE_2048 2048 /* MB */
85 /* thresholds for 64M RAM*/
86 #define PROACTIVE_64_THRES 10 /* MB */
87 #define PROACTIVE_64_LEAVE 30 /* MB */
88 #define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
89 #define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
90 #define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
91 #define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
92 #define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
93 #define CGROUP_ROOT_64_NUM_VICTIMS 1
95 /* thresholds for 256M RAM */
96 #define PROACTIVE_256_THRES 50 /* MB */
97 #define PROACTIVE_256_LEAVE 80 /* MB */
98 #define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
99 #define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
100 #define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
101 #define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
102 #define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
103 #define CGROUP_ROOT_256_NUM_VICTIMS 2
105 /* threshold for 448M RAM */
106 #define PROACTIVE_448_THRES 80 /* MB */
107 #define PROACTIVE_448_LEAVE 100 /* MB */
108 #define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
109 #define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
110 #define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
111 #define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
112 #define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
113 #define CGROUP_ROOT_448_NUM_VICTIMS 5
115 /* threshold for 512M RAM */
116 #define PROACTIVE_512_THRES 80 /* MB */
117 #define PROACTIVE_512_LEAVE 100 /* MB */
118 #define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
119 #define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
120 #define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
121 #define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
122 #define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
123 #define CGROUP_ROOT_512_NUM_VICTIMS 5
125 /* threshold for 768 RAM */
126 #define PROACTIVE_768_THRES 100 /* MB */
127 #define PROACTIVE_768_LEAVE 130 /* MB */
128 #define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
129 #define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
130 #define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
131 #define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
132 #define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
133 #define CGROUP_ROOT_768_NUM_VICTIMS 5
135 /* threshold for more than 1024M RAM */
136 #define PROACTIVE_1024_THRES 150 /* MB */
137 #define PROACTIVE_1024_LEAVE 230 /* MB */
138 #define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
139 #define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
140 #define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
141 #define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
142 #define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
143 #define CGROUP_ROOT_1024_NUM_VICTIMS 5
145 /* threshold for more than 2048M RAM */
146 #define PROACTIVE_2048_THRES 200 /* MB */
147 #define PROACTIVE_2048_LEAVE 500 /* MB */
148 #define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
149 #define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
150 #define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
151 #define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
152 #define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
153 #define CGROUP_ROOT_2048_NUM_VICTIMS 10
155 /* threshold for more than 3072M RAM */
156 #define PROACTIVE_3072_THRES 300 /* MB */
157 #define PROACTIVE_3072_LEAVE 700 /* MB */
158 #define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
159 #define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
160 #define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
161 #define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
162 #define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
163 #define CGROUP_ROOT_3072_NUM_VICTIMS 10
165 static unsigned proactive_threshold_mb;
166 static unsigned proactive_leave_mb;
167 static unsigned lmk_start_threshold_mb;
170 * Resourced Low Memory Killer
171 * NOTE: planned to be moved to a separate file.
173 /*-------------------------------------------------*/
174 #define OOM_TIMER_INTERVAL_SEC 2
175 #define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
176 #define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
178 struct lowmem_control {
180 * For each queued request the following properties
181 * are required with two exceptions:
182 * - status is being set by LMK
183 * - callback is optional
185 /* Processing flags*/
187 /* Indictator for OOM score of targeted processes */
188 enum oom_score score;
190 /* Desired size to be restored - level to be reached (MB)*/
191 unsigned int size_mb;
192 /* Max number of processes to be considered */
194 /* Memory reclaim status */
197 * Optional - if set, will be triggered by LMK once the request
200 void (*callback) (struct lowmem_control *);
203 struct lowmem_worker {
204 pthread_t worker_thread;
210 static struct lowmem_worker lmw;
212 /* Arrays for storing kill candidates and apps/procs */
213 static GArray *lowmem_kill_candidates = NULL;
214 static GArray *lowmem_task_info_app_array = NULL;
215 static GArray *lowmem_task_info_proc_array = NULL;
217 //static int memlog_enabled;
218 //static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
219 /* remove logfiles to reduce to this threshold.
220 * it is about five-sixths of the memlog_nr_max. */
221 //static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
222 //static char *memlog_path = DEFAULT_MEMLOG_PATH;
223 //static char *memlog_prefix[MEMLOG_MAX];
225 #define LOWMEM_WORKER_IS_ACTIVE(_lmw) g_atomic_int_get(&(_lmw)->active)
226 #define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
227 #define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
229 #define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
230 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
231 #define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
233 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
235 #define LOWMEM_DESTROY_REQUEST(_ctl) \
236 g_slice_free(typeof(*(_ctl)), _ctl); \
238 #define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
240 (c)->flags = __flags; (c)->score = __score; \
241 (c)->size_mb= __size; (c)->count = __count; \
242 (c)->callback = __cb; \
245 static void lowmem_queue_request(struct lowmem_worker *lmw,
246 struct lowmem_control *ctl)
248 if (LOWMEM_WORKER_IS_ACTIVE(lmw))
249 g_async_queue_push(lmw->queue, ctl);
253 static void lowmem_drain_queue(struct lowmem_worker *lmw)
255 struct lowmem_control *ctl;
257 g_async_queue_lock(lmw->queue);
258 while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
261 LOWMEM_DESTROY_REQUEST(ctl);
263 g_async_queue_unlock(lmw->queue);
266 static void lowmem_request_destroy(gpointer data)
268 struct lowmem_control *ctl = (struct lowmem_control*) data;
272 LOWMEM_DESTROY_REQUEST(ctl);
275 /*-------------------------------------------------*/
277 /* low memory action function for cgroup */
278 /* low memory action function */
279 static void swap_compact_act(void);
280 static void lmk_act(void);
282 struct lowmem_controller_ops {
283 int (*governor)(void *data);
284 int (*action)(void *data);
286 static struct lowmem_controller_ops lowmem_actions[MEM_LEVEL_MAX] = { NULL };
287 void lowmem_initialize_controller_ops_governor(int mem_state, int (*governor)(void *data))
291 case MEM_LEVEL_MEDIUM:
293 case MEM_LEVEL_CRITICAL:
294 lowmem_actions[mem_state].governor = governor;
300 void lowmem_initialize_controller_ops_action(int mem_state, int (*action)(void *data))
304 case MEM_LEVEL_MEDIUM:
305 case MEM_LEVEL_CRITICAL:
306 lowmem_actions[mem_state].action = action;
313 static size_t cur_mem_state = MEM_LEVEL_HIGH;
314 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
316 static unsigned long long totalram_bytes;
317 static unsigned long totalram_kb;
319 static bool oom_popup_enable;
320 static bool oom_popup;
321 static bool memcg_swap_status;
322 bool lowmem_get_memcg_swap_status()
324 return memcg_swap_status;
326 void lowmem_set_memcg_swap_status(bool status)
328 memcg_swap_status = status;
330 static int fragmentation_size;
332 const char *lowmem_convert_cgroup_type_to_str(int type)
334 static const char *type_table[] =
336 if (type >= MEMCG_ROOT && type <= MEMCG_THROTTLING)
337 return type_table[type];
342 static const char *convert_status_to_str(int status)
344 static const char *status_table[] =
345 {"none", "done", "drop", "cont", "retry", "next_type"};
346 if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
347 return status_table[status];
348 return "error status";
351 static const char *convert_memstate_to_str(int mem_state)
353 static const char *state_table[] = {"mem high", "mem medium",
354 "mem low", "mem critical", "mem oom",};
355 if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
356 return state_table[mem_state];
360 static int lowmem_launch_oompopup(void)
362 GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
363 g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
365 GVariant *const params = g_variant_new("(a{ss})", gv_builder);
366 g_variant_builder_unref(gv_builder);
368 int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
369 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
370 "PopupLaunch", params);
372 g_variant_unref(params);
377 static inline void get_total_memory(void)
384 totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
385 totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
387 register_totalram_bytes(totalram_bytes);
390 _E("Failed to get total ramsize from the kernel");
394 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
396 unsigned int size_kb = 0, total_size_kb = 0;
401 * If pids are allocated only when there are multiple processes with
402 * the same pgid e.g., browser and web process. Mostly, single process
405 if (tsk->pids == NULL) {
406 ret = proc_get_ram_usage(tsk->pid, &size_kb);
408 /* If there is no proc entry for given pid the process
409 * should be abandoned during further processing
412 _D("failed to get rss memory usage of %d", tsk->pid);
417 for (index = 0; index < tsk->pids->len; index++) {
418 pid = g_array_index(tsk->pids, pid_t, index);
419 ret = proc_get_ram_usage(pid, &size_kb);
420 if (ret != RESOURCED_ERROR_NONE)
422 total_size_kb += size_kb;
425 return total_size_kb;
428 static void lowmem_free_task_info_array(GArray *array)
435 for (i = 0; i < array->len; i++) {
436 struct task_info *tsk;
438 tsk = &g_array_index(array, struct task_info, i);
440 g_array_free(tsk->pids, true);
443 g_array_free(array, true);
446 static inline int is_dynamic_process_killer(int flags)
448 return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
451 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
453 unsigned int available = proc_get_mem_available();
454 unsigned int should_be_freed_mb = 0;
456 if (available < thres)
457 should_be_freed_mb = thres - available;
459 * free THRESHOLD_MARGIN more than real should be freed,
460 * because launching app is consuming up the memory.
462 if (should_be_freed_mb > 0)
463 should_be_freed_mb += THRESHOLD_MARGIN;
467 return should_be_freed_mb;
470 static void lowmem_oom_popup_once(void)
472 if (oom_popup_enable && !oom_popup) {
473 lowmem_launch_oompopup();
478 static int get_privilege(pid_t pid, char *name, size_t len)
481 char attr[MAX_NAME_LENGTH];
485 snprintf(path, sizeof(path), PROC_APP_ATTR_PATH, pid);
487 fp = fopen(path, "r");
491 attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
496 attr[attr_len] = '\0';
498 snprintf(name, len, "%s", attr);
502 static int is_app(pid_t pid)
504 char attr[MAX_NAME_LENGTH];
508 ret = get_privilege(pid, attr, sizeof(attr));
510 _E("Failed to get privilege of PID=%d, ret=%d.", pid, ret);
514 len = strlen(attr) + 1;
516 if (!strncmp("System", attr, len))
519 if (!strncmp("User", attr, len))
522 if (!strncmp("System::Privileged", attr, len))
528 static GArray *lowmem_get_task_info_app(int killer_flags, int start_oom, int end_oom)
531 GSList *proc_app_list = proc_app_list_open();
533 if (!lowmem_task_info_app_array)
534 lowmem_task_info_app_array = g_array_new(false, false, sizeof(struct task_info));
536 gslist_for_each_item(iter, proc_app_list) {
537 struct proc_app_info *pai = (struct proc_app_info *)(iter->data);
538 struct task_info task;
543 if (pai->memory.oom_score_adj > end_oom
544 || pai->memory.oom_score_adj < start_oom)
547 if ((killer_flags & OOM_REVISE) && pai->memory.oom_killed) {
549 * If it is not the first attempt to kill this app and
550 * the app is already killed
555 task.pid = pai->main_pid;
557 task.pids = g_array_new(false, false, sizeof(pid_t));
558 g_array_append_val(task.pids, task.pid);
559 for (GSList *iter_child = pai->childs; iter_child != NULL; iter_child = g_slist_next(iter_child)) {
560 pid_t child = GPOINTER_TO_PID(iter_child->data);
561 g_array_append_val(task.pids, child);
566 task.pgid = getpgid(task.pid);
567 task.oom_score_adj = pai->memory.oom_score_adj;
568 task.size = lowmem_get_task_mem_usage_rss(&task); /* KB */
569 task.proc_app_info_oom_killed = &(pai->memory.oom_killed);
570 task.proc_app_info_flags = pai->flags;
573 * Before oom_score_adj of favourite (oom_score = 270)
574 * applications is independent of lru_state, now we consider
575 * lru_state, while killing favourite process.
577 if (task.oom_score_adj == OOMADJ_FAVORITE
578 && pai->lru_state >= PROC_BACKGROUND) {
580 OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE
583 task.oom_score_lru = pai->memory.oom_score_adj;
586 g_array_append_val(lowmem_task_info_app_array, task);
589 proc_app_list_close();
591 g_array_ref(lowmem_task_info_app_array);
592 return lowmem_task_info_app_array;
595 static GArray *lowmem_get_task_info_proc()
598 struct dirent *dentry = NULL;
600 dp = opendir("/proc");
602 _E("fail to open /proc");
606 if (!lowmem_task_info_proc_array)
607 lowmem_task_info_proc_array = g_array_new(false, false, sizeof(struct task_info));
608 while ((dentry = readdir(dp)) != NULL) {
609 struct task_info task;
611 int oom_score_adj = 0;
613 if (!isdigit(dentry->d_name[0]))
616 pid = (pid_t)atoi(dentry->d_name);
618 continue; /* skip invalid pids or kernel processes */
627 if (proc_get_oom_score_adj(pid, &oom_score_adj) < 0) {
628 _D("pid(%d) was already terminated", pid);
633 * Check whether this array includes applications or not.
634 * If it doesn't require to get applications
635 * and pid has been already included in pai,
638 if (oom_score_adj > OOMADJ_SU && oom_score_adj <= OOMADJ_APP_MAX)
642 * Currently, for tasks in the memory cgroup,
643 * do not consider multiple tasks with one pgid.
648 task.oom_score_adj = oom_score_adj;
649 task.oom_score_lru = oom_score_adj;
650 task.size = lowmem_get_task_mem_usage_rss(&task);
652 * This task is not an app, so field variables below are not
653 * used in this task. If not app, oom_killed is NULL.
655 task.proc_app_info_oom_killed = NULL;
656 task.proc_app_info_flags = -1;
658 g_array_append_val(lowmem_task_info_proc_array, task);
663 g_array_ref(lowmem_task_info_proc_array);
664 return lowmem_task_info_proc_array;
667 struct lowmem_governor_ops {
668 int(*get_kill_candidates)(GArray *, GArray *, GArray *, unsigned long);
671 static struct lowmem_governor_ops governor_ops = { NULL };
672 void lowmem_initialize_governor_ops(int(*get_kill_candidates)(GArray *,
673 GArray *, GArray *, unsigned long))
675 governor_ops.get_kill_candidates = get_kill_candidates;
678 static int(*lowmem_controller_kill_candidates)(GArray *, unsigned, unsigned int,
679 int, int, int *, unsigned int *,
680 unsigned, void(*)(void));
681 void lowmem_initialize_kill_candidates(int(*kill_candidates)(GArray *, unsigned,
682 unsigned int, int, int, int *,
683 unsigned int *, unsigned,
686 lowmem_controller_kill_candidates = kill_candidates;
690 * @brief Terminate up to max_victims processes after finding them from pai.
691 It depends on proc_app_info lists
692 and it also reference systemservice cgroup
693 because some processes in this group don't have proc_app_info.
695 * @max_victims: max number of processes to be terminated
696 * @start_oom: find victims from start oom adj score value
697 * @end_oom: find victims to end oom adj score value
698 * @should_be_freed: amount of memory to be reclaimed (in MB)
699 * @total_size[out]: total size of possibly reclaimed memory (required)
700 * @completed: final outcome (optional)
701 * @threshold: desired value of memory available
703 static int lowmem_kill_victims(int max_victims,
704 int start_oom, int end_oom, unsigned should_be_freed, int flags,
705 unsigned int *total_size, int *completed, unsigned int threshold)
707 unsigned int total_victim_size = 0;
708 int candidates_cnt = 0;
710 int status = LOWMEM_RECLAIM_NONE;
711 GArray *task_info_app_array = NULL;
712 GArray *task_info_proc_array = NULL;
714 task_info_app_array = lowmem_get_task_info_app(flags, start_oom, end_oom);
716 * If start_oom == OOMADJ_SU, processes in /proc will be
717 * the lowmem_kill_candidates to handle low memory situation.
718 * Malicious system process can be found even though it has
721 task_info_proc_array = (start_oom == OOMADJ_SU)
722 ? lowmem_get_task_info_proc()
725 /* Get the victim candidates from lowmem governor */
726 if (!lowmem_kill_candidates)
727 lowmem_kill_candidates = g_array_new(false, false, sizeof(struct task_info *));
729 assert(governor_ops.get_kill_candidates != NULL);
730 candidates_cnt = governor_ops.get_kill_candidates(
731 lowmem_kill_candidates,
733 task_info_proc_array,
736 _D("[LMK] candidates_cnt=%d", candidates_cnt);
737 if (candidates_cnt <= 0) {
738 status = LOWMEM_RECLAIM_NEXT_TYPE;
742 assert(lowmem_controller_kill_candidates != NULL);
743 victim_cnt = lowmem_controller_kill_candidates(lowmem_kill_candidates,
744 should_be_freed, threshold,
746 &status, &total_victim_size,
747 lmk_start_threshold_mb,
748 lowmem_oom_popup_once);
750 if (lowmem_kill_candidates) {
751 /* Prevents the GArray to be really freed */
752 g_array_ref(lowmem_kill_candidates);
753 g_array_free(lowmem_kill_candidates, true);
755 lowmem_free_task_info_array(task_info_app_array);
756 lowmem_free_task_info_array(task_info_proc_array);
757 *total_size = total_victim_size;
758 if(*completed != LOWMEM_RECLAIM_CONT)
761 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
765 static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
767 if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
768 _E("[LMK] oom score (%d) is out of scope", score);
769 return RESOURCED_ERROR_FAIL;
772 *max = cgroup_get_highest_oom_score_adj(score);
773 *min = cgroup_get_lowest_oom_score_adj(score);
775 return RESOURCED_ERROR_NONE;
778 static void lowmem_handle_request(struct lowmem_control *ctl)
780 int start_oom, end_oom;
781 int count = 0, victim_cnt = 0;
782 int max_victim_cnt = ctl->count;
783 int status = LOWMEM_RECLAIM_NONE;
784 unsigned int available_mb = 0;
785 unsigned int total_size_mb = 0;
786 unsigned int current_size = 0;
787 unsigned int reclaim_size_mb, shortfall_mb = 0;
788 enum oom_score oom_score = ctl->score;
790 available_mb = proc_get_mem_available();
791 reclaim_size_mb = ctl->size_mb > available_mb /* MB */
792 ? ctl->size_mb - available_mb : 0;
794 if (!reclaim_size_mb) {
795 status = LOWMEM_RECLAIM_DONE;
800 /* Prepare LMK to start doing it's job. Check preconditions. */
801 if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
804 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
805 shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
807 if (!shortfall_mb || !reclaim_size_mb) {
808 status = LOWMEM_RECLAIM_DONE;
814 victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
815 reclaim_size_mb, ctl->flags, ¤t_size, &status, ctl->size_mb);
818 current_size = KBYTE_TO_MBYTE(current_size);
819 reclaim_size_mb -= reclaim_size_mb > current_size
820 ? current_size : reclaim_size_mb;
821 total_size_mb += current_size;
823 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
824 victim_cnt, current_size,
825 start_oom, end_oom, convert_status_to_str(status));
828 if ((status == LOWMEM_RECLAIM_DONE) ||
829 (status == LOWMEM_RECLAIM_DROP) ||
830 (status == LOWMEM_RECLAIM_RETRY))
834 * If it doesn't finish reclaiming memory in first operation,
835 - if flags has OOM_IN_DEPTH,
836 try to find victims again in the active cgroup.
837 otherwise, just return because there is no more victims in the desired cgroup.
838 - if flags has OOM_REVISE,
839 it means that resourced can't find victims from proc_app_list.
840 So, it should search victims or malicious process from /proc.
841 But searching /proc leads to abnormal behaviour.
842 (Make sluggish or kill same victims continuously)
843 Thus, otherwise, just return in first operation and wait some period.
845 if (oom_score == OOM_SCORE_LOW) {
846 oom_score = OOM_SCORE_MEDIUM;
848 } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
849 oom_score = OOM_SCORE_HIGH;
850 if(ctl->flags & OOM_FORCE)
851 max_victim_cnt = FOREGROUND_VICTIMS;
853 } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
854 status = LOWMEM_RECLAIM_RETRY;
855 ctl->score = OOM_SCORE_MAX;
857 else if (oom_score == OOM_SCORE_MAX) {
858 status = LOWMEM_RECLAIM_RETRY;
861 _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
862 count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
864 /* After we finish reclaiming it's worth to remove oldest memps logs */
865 ctl->status = status;
868 static void *lowmem_reclaim_worker(void *arg)
870 struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
872 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
874 g_async_queue_ref(lmw->queue);
878 struct lowmem_control *ctl;
880 LOWMEM_WORKER_IDLE(lmw);
881 /* Wait on any wake-up call */
882 ctl = g_async_queue_pop(lmw->queue);
885 _W("[LMK] ctl structure is NULL");
889 if ((ctl->flags & OOM_DROP) || !LOWMEM_WORKER_IS_ACTIVE(lmw)) {
890 LOWMEM_DESTROY_REQUEST(ctl);
894 LOWMEM_WORKER_RUN(lmw);
896 _D("[LMK] %d tries", ++try_count);
897 lowmem_handle_request(ctl);
899 * Case the process failed to reclaim requested amount of memory
900 * or still under have memory pressure - try the timeout wait.
901 * There is a chance this will get woken-up in a better reality.
903 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
904 !(ctl->flags & OOM_SINGLE_SHOT)) {
905 unsigned int available_mb = proc_get_mem_available();
907 if (available_mb >= ctl->size_mb) {
908 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
909 ctl->size_mb, available_mb);
910 ctl->status = LOWMEM_RECLAIM_DONE;
913 LOWMEM_DESTROY_REQUEST(ctl);
914 LOWMEM_WORKER_IDLE(lmw);
918 if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
919 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
920 ctl->flags |= OOM_REVISE;
926 * The ctl callback would check available size again.
927 * And it is last point in reclaiming worker.
928 * Resourced sent SIGKILL signal to victim processes
929 * so it should wait for a some seconds until each processes returns memory.
931 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
935 /* The lmk becomes the owner of all queued requests .. */
936 LOWMEM_DESTROY_REQUEST(ctl);
937 LOWMEM_WORKER_IDLE(lmw);
939 g_async_queue_unref(lmw->queue);
941 /* Free GArrays to save kill candidates and apps/procs */
942 if (lowmem_kill_candidates)
943 g_array_free(lowmem_kill_candidates, true);
944 lowmem_free_task_info_array(lowmem_task_info_app_array);
945 lowmem_free_task_info_array(lowmem_task_info_proc_array);
950 unsigned int lowmem_get_lowmem_state()
952 return cur_mem_state;
954 void lowmem_change_lowmem_state(unsigned int mem_state)
956 cur_mem_state = mem_state;
957 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
959 resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
960 (void *)&cur_mem_state);
963 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
967 int lowest_oom_score_adj;
970 _E("[SWAP] Unknown memory cgroup path to swap");
974 /* In this case, corresponding process will be moved to memory MEMCG_THROTTLING.
977 error = proc_get_oom_score_adj(pid, &oom_score_adj);
979 _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
983 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
985 if (oom_score_adj < lowest_oom_score_adj) {
986 oom_score_adj = lowest_oom_score_adj;
987 /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
988 proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
993 /* Correponding process is already managed per app or service.
994 * In addition, if some process is already located in the MEMCG_THROTTLING, then just do swap
996 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
999 void lowmem_memory_level_send_system_event(int lv)
1005 case MEM_LEVEL_HIGH:
1006 case MEM_LEVEL_MEDIUM:
1008 str = EVT_VAL_MEMORY_NORMAL;
1010 case MEM_LEVEL_CRITICAL:
1011 str = EVT_VAL_MEMORY_SOFT_WARNING;
1014 str = EVT_VAL_MEMORY_HARD_WARNING;
1017 _E("Invalid state");
1021 b = bundle_create();
1023 _E("Failed to create bundle");
1027 bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
1028 eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
1032 static void swap_compact_act(void)
1034 lowmem_change_lowmem_state(MEM_LEVEL_CRITICAL);
1035 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
1036 lowmem_memory_level_send_system_event(MEM_LEVEL_CRITICAL);
1039 static void medium_cb(struct lowmem_control *ctl)
1041 if (ctl->status == LOWMEM_RECLAIM_DONE)
1043 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1046 static void lmk_act(void)
1048 unsigned int available_mb;
1050 int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
1053 * Don't trigger reclaim worker
1054 * if it is already running
1056 if (LOWMEM_WORKER_IS_RUNNING(&lmw))
1059 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
1061 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
1063 lowmem_memory_level_send_system_event(MEM_LEVEL_OOM);
1064 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
1065 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
1066 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
1067 (void *)CGROUP_FREEZER_PAUSED);
1068 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
1069 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
1071 available_mb = proc_get_mem_available();
1073 lowmem_change_lowmem_state(MEM_LEVEL_OOM);
1075 if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
1076 struct lowmem_control *ctl;
1078 ctl = LOWMEM_NEW_REQUEST();
1080 LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
1081 OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
1082 num_max_victims, medium_cb);
1083 lowmem_queue_request(&lmw, ctl);
1087 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
1090 * Flush resourced memory such as other processes.
1091 * Resourced can use both many fast bins and sqlite3 cache memery.
1098 void lowmem_trigger_memory_state_action(int mem_state)
1101 * Check if the state we want to set is different from current
1102 * But it should except this condition if mem_state is already medium.
1103 * Otherwise, recalim worker couldn't run any more.
1105 if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
1108 switch (mem_state) {
1109 case MEM_LEVEL_HIGH:
1110 case MEM_LEVEL_MEDIUM:
1112 assert(lowmem_actions[mem_state].governor != NULL);
1113 assert(lowmem_actions[mem_state].action != NULL);
1114 if (lowmem_actions[mem_state].governor(NULL) < 0)
1116 lowmem_actions[mem_state].action(NULL);
1118 case MEM_LEVEL_CRITICAL:
1119 assert(lowmem_actions[mem_state].governor != NULL);
1120 assert(lowmem_actions[mem_state].action != NULL);
1121 if (lowmem_actions[mem_state].governor(NULL) < 0)
1123 lowmem_actions[mem_state].action(NULL);
1134 unsigned int lowmem_check_mem_state(unsigned int available_mb)
1137 for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
1138 if (mem_state != MEM_LEVEL_OOM &&
1139 available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
1141 else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
1148 /* setup memcg parameters depending on total ram size. */
1149 static void setup_memcg_params(void)
1151 unsigned long total_ramsize_mb;
1154 total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
1156 _D("Total: %lu MB", total_ramsize_mb);
1157 if (total_ramsize_mb <= MEM_SIZE_64) {
1158 /* set thresholds for ram size 64M */
1159 proactive_threshold_mb = PROACTIVE_64_THRES;
1160 proactive_leave_mb = PROACTIVE_64_LEAVE;
1161 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
1162 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
1163 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
1164 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
1165 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
1166 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
1167 } else if (total_ramsize_mb <= MEM_SIZE_256) {
1168 /* set thresholds for ram size 256M */
1169 proactive_threshold_mb = PROACTIVE_256_THRES;
1170 proactive_leave_mb = PROACTIVE_256_LEAVE;
1171 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
1172 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
1173 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
1174 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
1175 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
1176 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
1177 } else if (total_ramsize_mb <= MEM_SIZE_448) {
1178 /* set thresholds for ram size 448M */
1179 proactive_threshold_mb = PROACTIVE_448_THRES;
1180 proactive_leave_mb = PROACTIVE_448_LEAVE;
1181 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
1182 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
1183 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
1184 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1185 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1186 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1187 } else if (total_ramsize_mb <= MEM_SIZE_512) {
1188 /* set thresholds for ram size 512M */
1189 proactive_threshold_mb = PROACTIVE_512_THRES;
1190 proactive_leave_mb = PROACTIVE_512_LEAVE;
1191 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1192 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1193 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1194 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1195 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1196 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1197 } else if (total_ramsize_mb <= MEM_SIZE_768) {
1198 /* set thresholds for ram size 768M */
1199 proactive_threshold_mb = PROACTIVE_768_THRES;
1200 proactive_leave_mb = PROACTIVE_768_LEAVE;
1201 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1202 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1203 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1204 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1205 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1206 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1207 } else if (total_ramsize_mb <= MEM_SIZE_1024) {
1208 /* set thresholds for ram size more than 1G */
1209 proactive_threshold_mb = PROACTIVE_1024_THRES;
1210 proactive_leave_mb = PROACTIVE_1024_LEAVE;
1211 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1212 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1213 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1214 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1215 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1216 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1217 } else if (total_ramsize_mb <= MEM_SIZE_2048) {
1218 proactive_threshold_mb = PROACTIVE_2048_THRES;
1219 proactive_leave_mb = PROACTIVE_2048_LEAVE;
1220 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1221 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1222 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1223 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1224 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1225 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1227 proactive_threshold_mb = PROACTIVE_3072_THRES;
1228 proactive_leave_mb = PROACTIVE_3072_LEAVE;
1229 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1230 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1231 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1232 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1233 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1234 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1238 static int lowmem_activate_worker(void)
1240 int ret = RESOURCED_ERROR_NONE;
1242 if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1246 lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1248 _E("Failed to create request queue\n");
1249 return RESOURCED_ERROR_FAIL;
1251 LOWMEM_WORKER_ACTIVATE(&lmw);
1252 ret = pthread_create(&lmw.worker_thread, NULL,
1253 (void *)lowmem_reclaim_worker, (void *)&lmw);
1255 LOWMEM_WORKER_DEACTIVATE(&lmw);
1256 _E("Failed to create LMK thread: %d\n", ret);
1258 pthread_detach(lmw.worker_thread);
1259 ret = RESOURCED_ERROR_NONE;
1264 static void lowmem_deactivate_worker(void)
1266 struct lowmem_control *ctl;
1268 if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1271 LOWMEM_WORKER_DEACTIVATE(&lmw);
1272 lowmem_drain_queue(&lmw);
1274 ctl = LOWMEM_NEW_REQUEST();
1276 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1279 ctl->flags = OOM_DROP;
1280 g_async_queue_push(lmw.queue, ctl);
1281 g_async_queue_unref(lmw.queue);
1284 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1286 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1289 int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
1291 struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1296 flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1297 victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1298 score = score > 0 ? score : OOM_SCORE_LOW;
1299 threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
1301 lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1302 LOWMEM_SET_REQUEST(ctl, flags,
1303 score, threshold_mb, victims,
1304 lowmem_force_reclaim_cb);
1305 lowmem_queue_request(&lmw, ctl);
1310 void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
1312 int size_mb, victims;
1314 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1315 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1317 size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
1318 lowmem_trigger_reclaim(0, victims, score, size_mb);
1321 bool lowmem_fragmentated(void)
1323 struct buddyinfo bi;
1326 ret = proc_get_buddyinfo("Normal", &bi);
1331 * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
1332 * If total buddy pages is smaller than fragmentation_size,
1333 * resourced will detect kernel memory is fragmented.
1334 * Default value is zero in low memory device.
1336 if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
1337 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
1338 _I("fragmentation detected, need to execute proactive oom killer");
1344 static void lowmem_proactive_oom_killer(int flags, char *appid)
1346 unsigned int before_mb;
1349 before_mb = proc_get_mem_available();
1351 /* If memory state is medium or normal, just return and kill in oom killer */
1352 if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
1353 before_mb > proactive_leave_mb)
1356 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1357 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1359 #ifdef HEART_SUPPORT
1361 * This branch is used only when HEART module is compiled in and
1362 * it's MEMORY module must be enabled. Otherwise this is skipped.
1364 struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
1366 unsigned int rss_mb, after_mb, size_mb;
1368 rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
1372 after_mb = before_mb - rss_mb;
1374 * after launching app, ensure that available memory is
1375 * above threshold_leave
1377 if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1380 if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1381 size_mb = proactive_threshold_mb;
1383 size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
1385 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
1386 rss_mb, before_mb, size_mb);
1387 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
1394 * When there is no history data for the launching app,
1395 * it is necessary to check current fragmentation state or application manifest file.
1396 * So, resourced feels proactive LMK is required, run oom killer based on dynamic
1399 if (lowmem_fragmentated())
1403 * run proactive oom killer only when available is larger than
1404 * dynamic process threshold
1406 if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
1409 if (!(flags & PROC_LARGEMEMORY))
1414 * free THRESHOLD_MARGIN more than real should be freed,
1415 * because launching app is consuming up the memory.
1417 _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
1418 proactive_leave_mb + THRESHOLD_MARGIN);
1419 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
1422 unsigned int lowmem_get_proactive_thres(void)
1424 return proactive_threshold_mb;
1427 static int lowmem_prelaunch_handler(void *data)
1429 struct proc_status *ps = (struct proc_status *)data;
1430 struct proc_app_info *pai = ps->pai;
1432 if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
1433 return RESOURCED_ERROR_NONE;
1435 lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
1436 return RESOURCED_ERROR_NONE;
1439 static inline int calculate_threshold_size(double ratio)
1441 unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
1442 return BYTE_TO_MBYTE(size_bytes);
1445 static void load_configs(void)
1447 struct memcg_conf *memcg_conf = get_memcg_conf();
1449 /* set MemoryGroupLimit section */
1450 for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
1451 if (memcg_conf->cgroup_limit[cgroup] > 0.0)
1452 memcg_info_set_limit(get_memcg_info(cgroup),
1453 memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
1456 /* set MemoryLevelThreshold section */
1457 for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
1458 if (memcg_conf->threshold[lvl].percent &&
1459 memcg_conf->threshold[lvl].threshold > 0) {
1460 memcg_set_threshold(MEMCG_ROOT, lvl,
1461 calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
1463 if (lvl == MEM_LEVEL_OOM) {
1464 memcg_set_leave_threshold(MEMCG_ROOT,
1465 get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
1466 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1467 proactive_leave_mb = proactive_threshold_mb * 1.5;
1470 else if (memcg_conf->threshold[lvl].threshold > 0) {
1471 memcg_set_threshold(MEMCG_ROOT, lvl,
1472 memcg_conf->threshold[lvl].threshold);
1474 if (lvl == MEM_LEVEL_OOM) {
1475 memcg_set_leave_threshold(MEMCG_ROOT,
1476 get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
1477 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1478 proactive_leave_mb = proactive_threshold_mb * 1.5;
1483 oom_popup_enable = memcg_conf->oom_popup;
1485 /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
1486 lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
1487 memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
1488 lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
1489 memcg_conf->guiapp.action, memcg_conf->background.action);
1494 static void print_mem_configs(void)
1496 /* print info of Memory section */
1497 for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
1498 _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
1499 lowmem_convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
1502 for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
1503 for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
1504 _I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", lowmem_convert_cgroup_type_to_str(cgroup),
1505 convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
1509 _I("[LMK] set number of max victims as %d", num_max_victims);
1510 _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
1511 _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
1512 _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
1514 /* print info of POPUP section */
1515 _I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
1518 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
1519 static int lowmem_init(void)
1521 int ret = RESOURCED_ERROR_NONE;
1523 _D("resourced memory init start");
1526 ret = memcg_make_full_subdir(MEMCG_PATH);
1527 ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
1528 memcg_params_init();
1530 setup_memcg_params();
1532 /* default configuration */
1535 /* this function should be called after parsing configurations */
1536 memcg_write_limiter_params();
1537 print_mem_configs();
1539 /* make a worker thread called low memory killer */
1540 ret = lowmem_activate_worker();
1542 _E("[LMK] oom thread create failed\n");
1547 lowmem_limit_init();
1548 lowmem_system_init();
1550 register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1555 static int lowmem_exit(void)
1557 lowmem_deactivate_worker();
1558 lowmem_limit_exit();
1559 lowmem_system_exit();
1561 unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1563 return RESOURCED_ERROR_NONE;
1566 static int resourced_memory_init(void *data)
1568 return lowmem_init();
1571 static int resourced_memory_finalize(void *data)
1573 return lowmem_exit();
1576 void lowmem_change_memory_state(int state, int force)
1583 unsigned int available_mb = proc_get_mem_available();
1584 mem_state = lowmem_check_mem_state(available_mb);
1587 lowmem_trigger_memory_state_action(mem_state);
1590 unsigned long lowmem_get_ktotalram(void)
1595 unsigned long long lowmem_get_totalram(void)
1597 return totalram_bytes;
1600 void lowmem_restore_memcg(struct proc_app_info *pai)
1604 struct cgroup *cgroup = NULL;
1605 struct memcg_info *mi = NULL;
1606 pid_t pid = pai->main_pid;
1608 ret = cgroup_pid_get_path("memory", pid, &cgpath);
1612 for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
1613 cgroup = get_cgroup_tree(index);
1617 mi = cgroup->memcg_info;
1621 if (!strcmp(cgroup->hashname, ""))
1623 if (strstr(cgpath, cgroup->hashname))
1626 pai->memory.memcg_idx = index;
1627 pai->memory.memcg_info = mi;
1628 if(strstr(cgpath, pai->appid))
1629 pai->memory.use_mem_limit = true;
1634 static struct module_ops memory_modules_ops = {
1635 .priority = MODULE_PRIORITY_EARLY,
1637 .init = resourced_memory_init,
1638 .exit = resourced_memory_finalize,
1641 MODULE_REGISTER(&memory_modules_ops)