4 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file vmpressure-lowmem-handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
36 #include <sys/types.h>
39 #include <sys/sysinfo.h>
40 #include <sys/resource.h>
43 #include <eventsystem.h>
49 #include "lowmem-dbus.h"
50 #include "lowmem-monitor.h"
51 #include "lowmem-system.h"
52 #include "lowmem-limit.h"
53 #include "lowmem-governor.h"
54 #include "lowmem-controller.h"
55 #include "proc-common.h"
58 #include "resourced.h"
61 #include "config-parser.h"
63 #include "swap-common.h"
65 #include "memory-cgroup.h"
66 #include "heart-common.h"
67 #include "proc-main.h"
68 #include "dbus-handler.h"
70 #include "fd-handler.h"
71 #include "resourced-helper-worker.h"
72 #include "dedup-common.h"
73 #include "reclaim-config.h"
75 #define MAX_PROACTIVE_HIGH_VICTIMS 4
76 #define FOREGROUND_VICTIMS 1
77 #define OOM_KILLER_PRIORITY -20
78 #define THRESHOLD_MARGIN 10 /* MB */
80 #define MEM_SIZE_64 64 /* MB */
81 #define MEM_SIZE_256 256 /* MB */
82 #define MEM_SIZE_448 448 /* MB */
83 #define MEM_SIZE_512 512 /* MB */
84 #define MEM_SIZE_768 768 /* MB */
85 #define MEM_SIZE_1024 1024 /* MB */
86 #define MEM_SIZE_2048 2048 /* MB */
88 /* thresholds for 64M RAM*/
89 #define PROACTIVE_64_THRES 10 /* MB */
90 #define PROACTIVE_64_LEAVE 30 /* MB */
91 #define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
92 #define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
93 #define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
94 #define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
95 #define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
96 #define CGROUP_ROOT_64_NUM_VICTIMS 1
98 /* thresholds for 256M RAM */
99 #define PROACTIVE_256_THRES 50 /* MB */
100 #define PROACTIVE_256_LEAVE 80 /* MB */
101 #define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
102 #define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
103 #define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
104 #define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
105 #define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
106 #define CGROUP_ROOT_256_NUM_VICTIMS 2
108 /* threshold for 448M RAM */
109 #define PROACTIVE_448_THRES 80 /* MB */
110 #define PROACTIVE_448_LEAVE 100 /* MB */
111 #define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
112 #define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
113 #define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
114 #define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
115 #define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
116 #define CGROUP_ROOT_448_NUM_VICTIMS 5
118 /* threshold for 512M RAM */
119 #define PROACTIVE_512_THRES 80 /* MB */
120 #define PROACTIVE_512_LEAVE 100 /* MB */
121 #define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
122 #define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
123 #define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
124 #define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
125 #define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
126 #define CGROUP_ROOT_512_NUM_VICTIMS 5
128 /* threshold for 768 RAM */
129 #define PROACTIVE_768_THRES 100 /* MB */
130 #define PROACTIVE_768_LEAVE 130 /* MB */
131 #define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
132 #define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
133 #define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
134 #define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
135 #define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
136 #define CGROUP_ROOT_768_NUM_VICTIMS 5
138 /* threshold for more than 1024M RAM */
139 #define PROACTIVE_1024_THRES 150 /* MB */
140 #define PROACTIVE_1024_LEAVE 230 /* MB */
141 #define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
142 #define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
143 #define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
144 #define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
145 #define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
146 #define CGROUP_ROOT_1024_NUM_VICTIMS 5
148 /* threshold for more than 2048M RAM */
149 #define PROACTIVE_2048_THRES 200 /* MB */
150 #define PROACTIVE_2048_LEAVE 500 /* MB */
151 #define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
152 #define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
153 #define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
154 #define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
155 #define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
156 #define CGROUP_ROOT_2048_NUM_VICTIMS 10
158 /* threshold for more than 3072M RAM */
159 #define PROACTIVE_3072_THRES 300 /* MB */
160 #define PROACTIVE_3072_LEAVE 700 /* MB */
161 #define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
162 #define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
163 #define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
164 #define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
165 #define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
166 #define CGROUP_ROOT_3072_NUM_VICTIMS 10
168 static unsigned proactive_threshold_mb;
169 static unsigned proactive_leave_mb;
170 static unsigned lmk_start_threshold_mb;
173 * Resourced Low Memory Killer
174 * NOTE: planned to be moved to a separate file.
176 /*-------------------------------------------------*/
177 #define OOM_TIMER_INTERVAL_SEC 2
178 #define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
179 #define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
181 struct lowmem_control {
183 * For each queued request the following properties
184 * are required with two exceptions:
185 * - status is being set by LMK
186 * - callback is optional
188 /* Processing flags*/
190 /* Indictator for OOM score of targeted processes */
191 enum oom_score score;
193 /* Desired size to be restored - level to be reached (MB)*/
194 unsigned int size_mb;
195 /* Max number of processes to be considered */
197 /* Memory reclaim status */
200 * Optional - if set, will be triggered by LMK once the request
203 void (*callback) (struct lowmem_control *);
206 struct lowmem_worker {
207 pthread_t worker_thread;
213 static struct lowmem_worker lmw;
215 //static int memlog_enabled;
216 //static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
217 /* remove logfiles to reduce to this threshold.
218 * it is about five-sixths of the memlog_nr_max. */
219 //static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
220 //static char *memlog_path = DEFAULT_MEMLOG_PATH;
221 //static char *memlog_prefix[MEMLOG_MAX];
223 #define LOWMEM_WORKER_IS_ACTIVE(_lmw) g_atomic_int_get(&(_lmw)->active)
224 #define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
225 #define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
227 #define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
228 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
229 #define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
231 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
233 #define LOWMEM_DESTROY_REQUEST(_ctl) \
234 g_slice_free(typeof(*(_ctl)), _ctl); \
236 #define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
238 (c)->flags = __flags; (c)->score = __score; \
239 (c)->size_mb= __size; (c)->count = __count; \
240 (c)->callback = __cb; \
243 static void lowmem_queue_request(struct lowmem_worker *lmw,
244 struct lowmem_control *ctl)
246 if (LOWMEM_WORKER_IS_ACTIVE(lmw))
247 g_async_queue_push(lmw->queue, ctl);
251 static void lowmem_drain_queue(struct lowmem_worker *lmw)
253 struct lowmem_control *ctl;
255 g_async_queue_lock(lmw->queue);
256 while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
259 LOWMEM_DESTROY_REQUEST(ctl);
261 g_async_queue_unlock(lmw->queue);
264 static void lowmem_request_destroy(gpointer data)
266 struct lowmem_control *ctl = (struct lowmem_control*) data;
270 LOWMEM_DESTROY_REQUEST(ctl);
273 /*-------------------------------------------------*/
275 /* low memory action function for cgroup */
276 /* low memory action function */
277 static void high_mem_act(void);
278 static void swap_activate_act(void);
279 static void swap_compact_act(void);
280 static void lmk_act(void);
283 static size_t cur_mem_state = MEM_LEVEL_HIGH;
284 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
286 static unsigned long long totalram_bytes;
287 static unsigned long totalram_kb;
289 static bool oom_popup_enable;
290 static bool oom_popup;
291 static bool memcg_swap_status;
292 static int fragmentation_size;
294 static bool g_bg_reclaim = false;
296 const char *lowmem_convert_cgroup_type_to_str(int type)
298 static const char *type_table[] =
299 { "/", MEMCG_BACKGROUND_MRU_NAME, MEMCG_BACKGROUND_LRU_NAME};
300 if (type >= MEMCG_ROOT && type < MEMCG_END)
301 return type_table[type];
306 static const char *convert_status_to_str(int status)
308 static const char *status_table[] =
309 {"none", "done", "drop", "cont", "retry", "next_type"};
310 if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
311 return status_table[status];
312 return "error status";
315 static const char *convert_memstate_to_str(int mem_state)
317 static const char *state_table[] = {"mem high", "mem medium",
318 "mem low", "mem critical", "mem oom",};
319 if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
320 return state_table[mem_state];
324 static int lowmem_launch_oompopup(void)
326 GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
327 g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
329 GVariant *const params = g_variant_new("(a{ss})", gv_builder);
330 g_variant_builder_unref(gv_builder);
332 int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
333 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
334 "PopupLaunch", params);
336 g_variant_unref(params);
341 static inline void get_total_memory(void)
348 totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
349 totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
351 register_totalram_bytes(totalram_bytes);
354 _E("Failed to get total ramsize from the kernel");
358 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
360 unsigned int size_kb = 0, total_size_kb = 0;
365 * If pids are allocated only when there are multiple processes with
366 * the same pgid e.g., browser and web process. Mostly, single process
369 if (tsk->pids == NULL) {
370 ret = proc_get_ram_usage(tsk->pid, &size_kb);
372 /* If there is no proc entry for given pid the process
373 * should be abandoned during further processing
376 _D("failed to get rss memory usage of %d", tsk->pid);
381 for (index = 0; index < tsk->pids->len; index++) {
382 pid = g_array_index(tsk->pids, pid_t, index);
383 ret = proc_get_ram_usage(pid, &size_kb);
384 if (ret != RESOURCED_ERROR_NONE)
386 total_size_kb += size_kb;
389 return total_size_kb;
392 static void lowmem_free_task_info_array(GArray *array)
396 for (i = 0; i < array->len; i++) {
397 struct task_info *tsk;
399 tsk = &g_array_index(array, struct task_info, i);
401 g_array_free(tsk->pids, true);
404 g_array_free(array, true);
407 static inline int is_dynamic_process_killer(int flags)
409 return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
412 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
414 unsigned int available = proc_get_mem_available();
415 unsigned int should_be_freed_mb = 0;
417 if (available < thres)
418 should_be_freed_mb = thres - available;
420 * free THRESHOLD_MARGIN more than real should be freed,
421 * because launching app is consuming up the memory.
423 if (should_be_freed_mb > 0)
424 should_be_freed_mb += THRESHOLD_MARGIN;
428 return should_be_freed_mb;
431 static void lowmem_oom_popup_once(void)
433 if (oom_popup_enable && !oom_popup) {
434 lowmem_launch_oompopup();
440 * @brief Terminate up to max_victims processes after finding them from pai.
441 It depends on proc_app_info lists
442 and it also reference systemservice cgroup
443 because some processes in this group don't have proc_app_info.
445 * @max_victims: max number of processes to be terminated
446 * @start_oom: find victims from start oom adj score value
447 * @end_oom: find victims to end oom adj score value
448 * @should_be_freed: amount of memory to be reclaimed (in MB)
449 * @total_size[out]: total size of possibly reclaimed memory (required)
450 * @completed: final outcome (optional)
451 * @threshold: desired value of memory available
453 static int lowmem_kill_victims(int max_victims,
454 int start_oom, int end_oom, unsigned should_be_freed, int flags,
455 unsigned int *total_size, int *completed, unsigned int threshold)
457 GSList *proc_app_list = NULL;
458 unsigned int total_victim_size = 0;
460 int status = LOWMEM_RECLAIM_NONE;
461 GArray *candidates = NULL;
463 proc_app_list = proc_app_list_open();
465 /* Get the victim candidates from lowmem governor */
466 candidates = lowmem_governor_get_kill_candidates(proc_app_list, start_oom, end_oom, flags);
468 proc_app_list_close();
469 proc_app_list = NULL;
471 if (!candidates->len) {
472 status = LOWMEM_RECLAIM_NEXT_TYPE;
476 victim_cnt = lowmem_controller_kill_candidates(candidates,
477 should_be_freed, threshold,
479 &status, &total_victim_size,
480 lmk_start_threshold_mb,
481 lowmem_oom_popup_once);
484 lowmem_free_task_info_array(candidates);
485 *total_size = total_victim_size;
486 if(*completed != LOWMEM_RECLAIM_CONT)
489 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
493 static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
495 if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
496 _E("[LMK] oom score (%d) is out of scope", score);
497 return RESOURCED_ERROR_FAIL;
500 *max = cgroup_get_highest_oom_score_adj(score);
501 *min = cgroup_get_lowest_oom_score_adj(score);
503 return RESOURCED_ERROR_NONE;
506 static void lowmem_handle_request(struct lowmem_control *ctl)
508 int start_oom, end_oom;
509 int count = 0, victim_cnt = 0;
510 int max_victim_cnt = ctl->count;
511 int status = LOWMEM_RECLAIM_NONE;
512 unsigned int available_mb = 0;
513 unsigned int total_size_mb = 0;
514 unsigned int current_size = 0;
515 unsigned int reclaim_size_mb, shortfall_mb = 0;
516 enum oom_score oom_score = ctl->score;
518 available_mb = proc_get_mem_available();
519 reclaim_size_mb = ctl->size_mb > available_mb /* MB */
520 ? ctl->size_mb - available_mb : 0;
522 if (!reclaim_size_mb) {
523 status = LOWMEM_RECLAIM_DONE;
528 /* Prepare LMK to start doing it's job. Check preconditions. */
529 if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
532 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
533 shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
535 if (!shortfall_mb || !reclaim_size_mb) {
536 status = LOWMEM_RECLAIM_DONE;
542 victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
543 reclaim_size_mb, ctl->flags, ¤t_size, &status, ctl->size_mb);
546 current_size = KBYTE_TO_MBYTE(current_size);
547 reclaim_size_mb -= reclaim_size_mb > current_size
548 ? current_size : reclaim_size_mb;
549 total_size_mb += current_size;
551 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
552 victim_cnt, current_size,
553 start_oom, end_oom, convert_status_to_str(status));
556 if ((status == LOWMEM_RECLAIM_DONE) ||
557 (status == LOWMEM_RECLAIM_DROP) ||
558 (status == LOWMEM_RECLAIM_RETRY))
562 * If it doesn't finish reclaiming memory in first operation,
563 - if flags has OOM_IN_DEPTH,
564 try to find victims again in the active cgroup.
565 otherwise, just return because there is no more victims in the desired cgroup.
566 - if flags has OOM_REVISE,
567 it means that resourced can't find victims from proc_app_list.
568 So, it should search victims or malicious process from /proc.
569 But searching /proc leads to abnormal behaviour.
570 (Make sluggish or kill same victims continuously)
571 Thus, otherwise, just return in first operation and wait some period.
573 if (oom_score == OOM_SCORE_LOW) {
574 oom_score = OOM_SCORE_MEDIUM;
576 } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
577 oom_score = OOM_SCORE_HIGH;
578 if(ctl->flags & OOM_FORCE)
579 max_victim_cnt = FOREGROUND_VICTIMS;
581 } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
582 status = LOWMEM_RECLAIM_RETRY;
583 ctl->score = OOM_SCORE_MAX;
585 else if (oom_score == OOM_SCORE_MAX) {
586 status = LOWMEM_RECLAIM_RETRY;
589 _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
590 count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
592 /* After we finish reclaiming it's worth to remove oldest memps logs */
593 ctl->status = status;
596 static void *lowmem_reclaim_worker(void *arg)
598 struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
600 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
602 g_async_queue_ref(lmw->queue);
606 struct lowmem_control *ctl;
608 LOWMEM_WORKER_IDLE(lmw);
609 /* Wait on any wake-up call */
610 ctl = g_async_queue_pop(lmw->queue);
613 _W("[LMK] ctl structure is NULL");
617 if ((ctl->flags & OOM_DROP) || !LOWMEM_WORKER_IS_ACTIVE(lmw)) {
618 LOWMEM_DESTROY_REQUEST(ctl);
622 LOWMEM_WORKER_RUN(lmw);
624 _D("[LMK] %d tries", ++try_count);
625 lowmem_handle_request(ctl);
627 * Case the process failed to reclaim requested amount of memory
628 * or still under have memory pressure - try the timeout wait.
629 * There is a chance this will get woken-up in a better reality.
631 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
632 !(ctl->flags & OOM_SINGLE_SHOT)) {
633 unsigned int available_mb = proc_get_mem_available();
635 if (available_mb >= ctl->size_mb) {
636 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
637 ctl->size_mb, available_mb);
638 ctl->status = LOWMEM_RECLAIM_DONE;
641 LOWMEM_DESTROY_REQUEST(ctl);
642 LOWMEM_WORKER_IDLE(lmw);
646 if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
647 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
648 ctl->flags |= OOM_REVISE;
654 * The ctl callback would check available size again.
655 * And it is last point in reclaiming worker.
656 * Resourced sent SIGKILL signal to victim processes
657 * so it should wait for a some seconds until each processes returns memory.
659 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
663 /* The lmk becomes the owner of all queued requests .. */
664 LOWMEM_DESTROY_REQUEST(ctl);
665 LOWMEM_WORKER_IDLE(lmw);
667 g_async_queue_unref(lmw->queue);
671 static void change_lowmem_state(unsigned int mem_state)
673 cur_mem_state = mem_state;
674 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
676 resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
677 (void *)&cur_mem_state);
680 /* only app can call this function
681 * that is, service cannot call the function
683 static void lowmem_swap_memory(char *path)
685 unsigned int available_mb;
687 if (cur_mem_state == MEM_LEVEL_HIGH)
690 if (swap_get_state() != SWAP_ON)
693 available_mb = proc_get_mem_available();
694 if (cur_mem_state != MEM_LEVEL_LOW &&
695 available_mb <= get_root_memcg_info()->threshold_mb[MEM_LEVEL_LOW])
698 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
699 memcg_swap_status = true;
702 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
706 int lowest_oom_score_adj;
709 _E("[SWAP] Unknown memory cgroup path to swap");
713 /* In this case, corresponding process will be moved to memory MEMCG_BACKGROUND_LRU.
716 error = proc_get_oom_score_adj(pid, &oom_score_adj);
718 _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
722 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
724 if (oom_score_adj < lowest_oom_score_adj) {
725 oom_score_adj = lowest_oom_score_adj;
726 /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
727 proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
732 /* Correponding process is already managed per app or service.
733 * In addition, if some process is already located in
734 * the MEMCG_BACKGROUND_LRU, then just do swap.
736 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
739 static void memory_level_send_system_event(int lv)
746 case MEM_LEVEL_MEDIUM:
748 str = EVT_VAL_MEMORY_NORMAL;
750 case MEM_LEVEL_CRITICAL:
751 str = EVT_VAL_MEMORY_SOFT_WARNING;
754 str = EVT_VAL_MEMORY_HARD_WARNING;
763 _E("Failed to create bundle");
767 bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
768 eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
772 static void high_mem_act(void)
776 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
778 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
779 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
780 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
781 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
782 memory_level_send_system_event(MEM_LEVEL_HIGH);
785 change_lowmem_state(MEM_LEVEL_HIGH);
787 if (swap_get_state() == SWAP_ON && memcg_swap_status) {
788 resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT,
789 get_memcg_info(MEMCG_BACKGROUND_LRU));
790 memcg_swap_status = false;
792 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
793 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
794 (void *)CGROUP_FREEZER_ENABLED);
797 static void swap_activate_act(void)
801 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
803 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
805 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
806 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
807 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
808 memory_level_send_system_event(MEM_LEVEL_LOW);
810 change_lowmem_state(MEM_LEVEL_LOW);
811 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
812 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
813 (void *)CGROUP_FREEZER_ENABLED);
815 if (swap_get_state() != SWAP_ON)
816 resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
819 static void dedup_act(enum ksm_scan_mode mode)
824 if (dedup_get_state() != DEDUP_ONE_SHOT)
827 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
828 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
829 (void *)CGROUP_FREEZER_ENABLED);
831 if (mode == KSM_SCAN_PARTIAL) {
832 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
834 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
836 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
837 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
838 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
839 memory_level_send_system_event(MEM_LEVEL_MEDIUM);
841 change_lowmem_state(MEM_LEVEL_MEDIUM);
843 data = KSM_SCAN_PARTIAL;
844 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
845 } else if (mode == KSM_SCAN_FULL) {
846 data = KSM_SCAN_FULL;
847 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
851 static void swap_compact_act(void)
853 change_lowmem_state(MEM_LEVEL_CRITICAL);
854 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
855 memory_level_send_system_event(MEM_LEVEL_CRITICAL);
858 static void medium_cb(struct lowmem_control *ctl)
860 if (ctl->status == LOWMEM_RECLAIM_DONE)
862 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
865 static void lmk_act(void)
867 unsigned int available_mb;
869 int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
872 * Don't trigger reclaim worker
873 * if it is already running
875 if (LOWMEM_WORKER_IS_RUNNING(&lmw))
878 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
880 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
882 memory_level_send_system_event(MEM_LEVEL_OOM);
883 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
884 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
885 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
886 (void *)CGROUP_FREEZER_PAUSED);
887 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
888 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
890 available_mb = proc_get_mem_available();
892 change_lowmem_state(MEM_LEVEL_OOM);
894 if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
895 struct lowmem_control *ctl;
897 ctl = LOWMEM_NEW_REQUEST();
899 LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
900 OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
901 num_max_victims, medium_cb);
902 lowmem_queue_request(&lmw, ctl);
906 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
909 * Flush resourced memory such as other processes.
910 * Resourced can use both many fast bins and sqlite3 cache memery.
917 void lowmem_trigger_memory_state_action(int mem_state)
920 * Check if the state we want to set is different from current
921 * But it should except this condition if mem_state is already medium.
922 * Otherwise, recalim worker couldn't run any more.
924 if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
931 case MEM_LEVEL_MEDIUM:
932 dedup_act(KSM_SCAN_PARTIAL);
937 case MEM_LEVEL_CRITICAL:
938 dedup_act(KSM_SCAN_FULL);
949 static unsigned int check_mem_state(unsigned int available_mb)
952 for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
953 if (mem_state != MEM_LEVEL_OOM &&
954 available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
956 else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
963 /* setup memcg parameters depending on total ram size. */
964 static void setup_memcg_params(void)
966 unsigned long total_ramsize_mb;
969 total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
971 _D("Total: %lu MB", total_ramsize_mb);
972 if (total_ramsize_mb <= MEM_SIZE_64) {
973 /* set thresholds for ram size 64M */
974 proactive_threshold_mb = PROACTIVE_64_THRES;
975 proactive_leave_mb = PROACTIVE_64_LEAVE;
976 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
977 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
978 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
979 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
980 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
981 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
982 } else if (total_ramsize_mb <= MEM_SIZE_256) {
983 /* set thresholds for ram size 256M */
984 proactive_threshold_mb = PROACTIVE_256_THRES;
985 proactive_leave_mb = PROACTIVE_256_LEAVE;
986 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
987 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
988 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
989 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
990 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
991 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
992 } else if (total_ramsize_mb <= MEM_SIZE_448) {
993 /* set thresholds for ram size 448M */
994 proactive_threshold_mb = PROACTIVE_448_THRES;
995 proactive_leave_mb = PROACTIVE_448_LEAVE;
996 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
997 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
998 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
999 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
1000 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
1001 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
1002 } else if (total_ramsize_mb <= MEM_SIZE_512) {
1003 /* set thresholds for ram size 512M */
1004 proactive_threshold_mb = PROACTIVE_512_THRES;
1005 proactive_leave_mb = PROACTIVE_512_LEAVE;
1006 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1007 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1008 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1009 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1010 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1011 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1012 } else if (total_ramsize_mb <= MEM_SIZE_768) {
1013 /* set thresholds for ram size 768M */
1014 proactive_threshold_mb = PROACTIVE_768_THRES;
1015 proactive_leave_mb = PROACTIVE_768_LEAVE;
1016 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1017 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1018 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1019 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1020 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1021 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1022 } else if (total_ramsize_mb <= MEM_SIZE_1024) {
1023 /* set thresholds for ram size more than 1G */
1024 proactive_threshold_mb = PROACTIVE_1024_THRES;
1025 proactive_leave_mb = PROACTIVE_1024_LEAVE;
1026 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1027 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1028 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1029 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1030 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1031 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1032 } else if (total_ramsize_mb <= MEM_SIZE_2048) {
1033 proactive_threshold_mb = PROACTIVE_2048_THRES;
1034 proactive_leave_mb = PROACTIVE_2048_LEAVE;
1035 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1036 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1037 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1038 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1039 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1040 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1042 proactive_threshold_mb = PROACTIVE_3072_THRES;
1043 proactive_leave_mb = PROACTIVE_3072_LEAVE;
1044 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1045 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1046 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1047 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1048 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1049 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1053 static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
1055 int cur_oom_score_adj;
1057 struct memcg_info *mi;
1058 int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
1060 mi = get_memcg_info(next_memcg_idx);
1063 _W("oom_score_adj = %d is out of range", next_oom_score_adj);
1068 cgroup_write_pid_fullpath(mi->name, pid);
1073 if (pai->main_pid == pid) {
1074 cur_oom_score_adj = pai->memory.oom_score_adj;
1075 cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
1077 if (cur_oom_score_adj == next_oom_score_adj) {
1078 _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
1082 proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
1084 switch (next_memcg_idx) {
1086 case MEMCG_BACKGROUND_LRU:
1087 if (!lowmem_limit_move_cgroup(pai))
1091 case MEMCG_BACKGROUND_MRU:
1094 _E("Unknown memory cgroup index");
1098 cgroup_write_pid_fullpath(mi->name, pid);
1100 _I("app (%s) main pid (%d) move %s -> %s (oom = %d -> %d)",
1102 lowmem_convert_cgroup_type_to_str(cur_memcg_idx),
1103 lowmem_convert_cgroup_type_to_str(next_memcg_idx),
1104 cur_oom_score_adj, next_oom_score_adj);
1106 if(cur_memcg_idx == next_memcg_idx)
1109 if (next_memcg_idx == MEMCG_BACKGROUND_LRU)
1110 lowmem_swap_memory(get_memcg_info(MEMCG_BACKGROUND_LRU)->name);
1114 switch (next_memcg_idx) {
1116 case MEMCG_BACKGROUND_LRU:
1117 if (pai->memory.use_mem_limit)
1120 case MEMCG_BACKGROUND_MRU:
1123 _E("Unknown memory cgroup index");
1127 _I("app (%s) child pid (%d) move -> %s (oom = -> %d)",
1129 lowmem_convert_cgroup_type_to_str(next_memcg_idx),
1130 next_oom_score_adj);
1131 cgroup_write_pid_fullpath(mi->name, pid);
1135 static int lowmem_activate_worker(void)
1137 int ret = RESOURCED_ERROR_NONE;
1139 if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1143 lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1145 _E("Failed to create request queue\n");
1146 return RESOURCED_ERROR_FAIL;
1148 LOWMEM_WORKER_ACTIVATE(&lmw);
1149 ret = pthread_create(&lmw.worker_thread, NULL,
1150 (void *)lowmem_reclaim_worker, (void *)&lmw);
1152 LOWMEM_WORKER_DEACTIVATE(&lmw);
1153 _E("Failed to create LMK thread: %d\n", ret);
1155 pthread_detach(lmw.worker_thread);
1156 ret = RESOURCED_ERROR_NONE;
1161 static void lowmem_deactivate_worker(void)
1163 struct lowmem_control *ctl;
1165 if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1168 LOWMEM_WORKER_DEACTIVATE(&lmw);
1169 lowmem_drain_queue(&lmw);
1171 ctl = LOWMEM_NEW_REQUEST();
1173 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1176 ctl->flags = OOM_DROP;
1177 g_async_queue_push(lmw.queue, ctl);
1178 g_async_queue_unref(lmw.queue);
1181 static void lowmem_press_root_cgroup_handler(void)
1183 static unsigned int prev_available_mb;
1184 unsigned int available_mb;
1187 available_mb = proc_get_mem_available();
1188 if (prev_available_mb == available_mb)
1191 mem_state = check_mem_state(available_mb);
1192 lowmem_trigger_memory_state_action(mem_state);
1193 prev_available_mb = available_mb;
1196 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1198 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1201 int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
1203 struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1208 flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1209 victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1210 score = score > 0 ? score : OOM_SCORE_LOW;
1211 threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
1213 lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1214 LOWMEM_SET_REQUEST(ctl, flags,
1215 score, threshold_mb, victims,
1216 lowmem_force_reclaim_cb);
1217 lowmem_queue_request(&lmw, ctl);
1222 void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
1224 int size_mb, victims;
1226 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1227 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1229 size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
1230 lowmem_trigger_reclaim(0, victims, score, size_mb);
1233 bool lowmem_fragmentated(void)
1235 struct buddyinfo bi;
1238 ret = proc_get_buddyinfo("Normal", &bi);
1243 * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
1244 * If total buddy pages is smaller than fragmentation_size,
1245 * resourced will detect kernel memory is fragmented.
1246 * Default value is zero in low memory device.
1248 if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
1249 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
1250 _I("fragmentation detected, need to execute proactive oom killer");
1256 static void lowmem_proactive_oom_killer(int flags, char *appid)
1258 unsigned int before_mb;
1261 before_mb = proc_get_mem_available();
1263 /* If memory state is medium or normal, just return and kill in oom killer */
1264 if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
1265 before_mb > proactive_leave_mb)
1268 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1269 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1271 #ifdef HEART_SUPPORT
1273 * This branch is used only when HEART module is compiled in and
1274 * it's MEMORY module must be enabled. Otherwise this is skipped.
1276 struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
1278 unsigned int rss_mb, after_mb, size_mb;
1280 rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
1284 after_mb = before_mb - rss_mb;
1286 * after launching app, ensure that available memory is
1287 * above threshold_leave
1289 if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1292 if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1293 size_mb = proactive_threshold_mb;
1295 size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
1297 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
1298 rss_mb, before_mb, size_mb);
1299 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
1306 * When there is no history data for the launching app,
1307 * it is necessary to check current fragmentation state or application manifest file.
1308 * So, resourced feels proactive LMK is required, run oom killer based on dynamic
1311 if (lowmem_fragmentated())
1315 * run proactive oom killer only when available is larger than
1316 * dynamic process threshold
1318 if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
1321 if (!(flags & PROC_LARGEMEMORY))
1326 * free THRESHOLD_MARGIN more than real should be freed,
1327 * because launching app is consuming up the memory.
1329 _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
1330 proactive_leave_mb + THRESHOLD_MARGIN);
1331 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
1334 unsigned int lowmem_get_proactive_thres(void)
1336 return proactive_threshold_mb;
1339 static int lowmem_prelaunch_handler(void *data)
1341 struct proc_status *ps = (struct proc_status *)data;
1342 struct proc_app_info *pai = ps->pai;
1344 if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
1345 return RESOURCED_ERROR_NONE;
1347 lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
1348 return RESOURCED_ERROR_NONE;
1351 int lowmem_control_handler(void *data)
1353 struct lowmem_control_data *lowmem_data;
1355 lowmem_data = (struct lowmem_control_data *)data;
1356 switch (lowmem_data->control_type) {
1357 case LOWMEM_MOVE_CGROUP:
1358 lowmem_move_memcgroup((pid_t)lowmem_data->pid,
1359 lowmem_data->oom_score_adj, lowmem_data->pai);
1364 return RESOURCED_ERROR_NONE;
1367 static int lowmem_bg_reclaim_handler(void *data)
1369 if (swap_get_state() != SWAP_ON)
1370 return RESOURCED_ERROR_NONE;
1373 return RESOURCED_ERROR_NONE;
1376 * Proactively reclaiming memory used by long-lived background processes
1377 * (such as widget instances) may be efficient on devices with limited
1378 * memory constraints. The pages used by such processes could be reclaimed
1379 * (if swap is enabled) earlier than they used to while minimizing the
1380 * impact on the user experience.
1382 resourced_notify(RESOURCED_NOTIFIER_SWAP_START,
1383 get_memcg_info(MEMCG_BACKGROUND_MRU)->name);
1385 return RESOURCED_ERROR_NONE;
1388 static inline int calculate_threshold_size(double ratio)
1390 unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
1391 return BYTE_TO_MBYTE(size_bytes);
1394 static void load_configs(void)
1396 struct memcg_conf *memcg_conf = get_memcg_conf();
1397 struct reclaim_conf *reclaim_conf = config_get_reclaim_conf();
1399 /* set MemoryGroupLimit section */
1400 for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
1401 if (memcg_conf->cgroup_limit[cgroup] > 0.0)
1402 memcg_info_set_limit(get_memcg_info(cgroup),
1403 memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
1406 /* set MemoryLevelThreshold section */
1407 for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
1408 if (memcg_conf->threshold[lvl].percent &&
1409 memcg_conf->threshold[lvl].threshold > 0) {
1410 memcg_set_threshold(MEMCG_ROOT, lvl,
1411 calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
1413 if (lvl == MEM_LEVEL_OOM) {
1414 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1415 proactive_leave_mb = proactive_threshold_mb * 1.5;
1418 else if (memcg_conf->threshold[lvl].threshold > 0) {
1419 memcg_set_threshold(MEMCG_ROOT, lvl,
1420 memcg_conf->threshold[lvl].threshold);
1422 if (lvl == MEM_LEVEL_OOM) {
1423 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1424 proactive_leave_mb = proactive_threshold_mb * 1.5;
1430 if (memcg_conf->threshold_leave.percent &&
1431 memcg_conf->threshold_leave.threshold > 0) {
1432 memcg_set_leave_threshold(MEMCG_ROOT,
1433 calculate_threshold_size(memcg_conf->threshold_leave.threshold));
1434 } else if (memcg_conf->threshold_leave.threshold > 0) {
1435 memcg_set_leave_threshold(MEMCG_ROOT,
1436 memcg_conf->threshold_leave.threshold);
1439 oom_popup_enable = memcg_conf->oom_popup;
1440 set_memcg_limit_trigger(memcg_conf->limit_trigger);
1442 /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
1443 lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
1444 memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
1445 lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
1446 memcg_conf->guiapp.action, memcg_conf->background.action);
1448 /* set MemoryBackgroundReclaim section */
1450 goto free_memcg_conf;
1452 g_bg_reclaim = reclaim_conf->screen_dim;
1454 config_free_reclaim_conf();
1460 static void print_mem_configs(void)
1462 /* print info of Memory section */
1463 for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
1464 _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
1465 lowmem_convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
1468 for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
1469 for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
1470 _I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", lowmem_convert_cgroup_type_to_str(cgroup),
1471 convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
1475 _I("[LMK] set number of max victims as %d", num_max_victims);
1476 _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
1477 _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
1478 _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
1480 /* print info of POPUP section */
1481 _I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
1483 _I("Background reclaim enabled = %d", g_bg_reclaim);
1486 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
1487 static int lowmem_init(void)
1489 int ret = RESOURCED_ERROR_NONE;
1491 _D("resourced memory init start");
1494 ret = memcg_make_full_subdir(MEMCG_PATH);
1495 ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
1496 memcg_params_init();
1498 setup_memcg_params();
1500 /* default configuration */
1503 /* this function should be called after parsing configurations */
1504 memcg_write_limiter_params();
1505 print_mem_configs();
1507 /* Initalize lowmem governor module before making a low memory killer */
1508 lowmem_governor_init();
1510 /* make a worker thread called low memory killer */
1511 ret = lowmem_activate_worker();
1513 _E("[LMK] oom thread create failed\n");
1517 /* register threshold and event fd */
1518 ret = lowmem_monitor_pressure_initialize(
1519 lowmem_press_root_cgroup_handler);
1521 _E("[MEMORY-LIMIT] eventfd setup failed");
1526 lowmem_limit_init();
1527 lowmem_system_init();
1529 register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1530 register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
1531 register_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
1536 static int lowmem_exit(void)
1538 lowmem_deactivate_worker();
1539 lowmem_governor_exit();
1540 lowmem_limit_exit();
1541 lowmem_system_exit();
1543 unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1544 unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
1545 unregister_notifier(RESOURCED_NOTIFIER_LCD_OFF, lowmem_bg_reclaim_handler);
1547 return RESOURCED_ERROR_NONE;
1550 static int resourced_memory_init(void *data)
1552 return lowmem_init();
1555 static int resourced_memory_finalize(void *data)
1557 return lowmem_exit();
1560 void lowmem_change_memory_state(int state, int force)
1567 unsigned int available_mb = proc_get_mem_available();
1568 mem_state = check_mem_state(available_mb);
1571 lowmem_trigger_memory_state_action(mem_state);
1574 unsigned long lowmem_get_ktotalram(void)
1579 unsigned long long lowmem_get_totalram(void)
1581 return totalram_bytes;
1584 void lowmem_restore_memcg(struct proc_app_info *pai)
1588 struct cgroup *cgroup = NULL;
1589 struct memcg_info *mi = NULL;
1590 pid_t pid = pai->main_pid;
1592 ret = cgroup_pid_get_path("memory", pid, &cgpath);
1596 for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
1597 cgroup = get_cgroup_tree(index);
1601 mi = cgroup->memcg_info;
1605 if (!strcmp(cgroup->hashname, ""))
1607 if (strstr(cgpath, cgroup->hashname))
1610 pai->memory.memcg_idx = index;
1611 pai->memory.memcg_info = mi;
1612 if(strstr(cgpath, pai->appid))
1613 pai->memory.use_mem_limit = true;
1618 static struct module_ops memory_modules_ops = {
1619 .priority = MODULE_PRIORITY_EARLY,
1621 .init = resourced_memory_init,
1622 .exit = resourced_memory_finalize,
1625 MODULE_REGISTER(&memory_modules_ops)