4 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
20 * @file vmpressure-lowmem-handler.c
22 * @desc lowmem handler using memcgroup
24 * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
36 #include <sys/types.h>
39 #include <sys/sysinfo.h>
40 #include <sys/resource.h>
43 #include <eventsystem.h>
49 #include "lowmem-dbus.h"
50 #include "lowmem-monitor.h"
51 #include "lowmem-system.h"
52 #include "lowmem-limit.h"
53 #include "lowmem-governor.h"
54 #include "lowmem-controller.h"
55 #include "proc-common.h"
58 #include "resourced.h"
61 #include "config-parser.h"
63 #include "swap-common.h"
65 #include "memory-cgroup.h"
66 #include "heart-common.h"
67 #include "proc-main.h"
68 #include "dbus-handler.h"
70 #include "fd-handler.h"
71 #include "resourced-helper-worker.h"
72 #include "dedup-common.h"
74 #define MAX_PROACTIVE_HIGH_VICTIMS 4
75 #define FOREGROUND_VICTIMS 1
76 #define OOM_KILLER_PRIORITY -20
77 #define THRESHOLD_MARGIN 10 /* MB */
79 #define MEM_SIZE_64 64 /* MB */
80 #define MEM_SIZE_256 256 /* MB */
81 #define MEM_SIZE_448 448 /* MB */
82 #define MEM_SIZE_512 512 /* MB */
83 #define MEM_SIZE_768 768 /* MB */
84 #define MEM_SIZE_1024 1024 /* MB */
85 #define MEM_SIZE_2048 2048 /* MB */
87 /* thresholds for 64M RAM*/
88 #define PROACTIVE_64_THRES 10 /* MB */
89 #define PROACTIVE_64_LEAVE 30 /* MB */
90 #define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
91 #define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
92 #define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
93 #define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
94 #define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
95 #define CGROUP_ROOT_64_NUM_VICTIMS 1
97 /* thresholds for 256M RAM */
98 #define PROACTIVE_256_THRES 50 /* MB */
99 #define PROACTIVE_256_LEAVE 80 /* MB */
100 #define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
101 #define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
102 #define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
103 #define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
104 #define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
105 #define CGROUP_ROOT_256_NUM_VICTIMS 2
107 /* threshold for 448M RAM */
108 #define PROACTIVE_448_THRES 80 /* MB */
109 #define PROACTIVE_448_LEAVE 100 /* MB */
110 #define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
111 #define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
112 #define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
113 #define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
114 #define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
115 #define CGROUP_ROOT_448_NUM_VICTIMS 5
117 /* threshold for 512M RAM */
118 #define PROACTIVE_512_THRES 80 /* MB */
119 #define PROACTIVE_512_LEAVE 100 /* MB */
120 #define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
121 #define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
122 #define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
123 #define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
124 #define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
125 #define CGROUP_ROOT_512_NUM_VICTIMS 5
127 /* threshold for 768 RAM */
128 #define PROACTIVE_768_THRES 100 /* MB */
129 #define PROACTIVE_768_LEAVE 130 /* MB */
130 #define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
131 #define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
132 #define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
133 #define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
134 #define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
135 #define CGROUP_ROOT_768_NUM_VICTIMS 5
137 /* threshold for more than 1024M RAM */
138 #define PROACTIVE_1024_THRES 150 /* MB */
139 #define PROACTIVE_1024_LEAVE 230 /* MB */
140 #define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
141 #define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
142 #define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
143 #define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
144 #define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
145 #define CGROUP_ROOT_1024_NUM_VICTIMS 5
147 /* threshold for more than 2048M RAM */
148 #define PROACTIVE_2048_THRES 200 /* MB */
149 #define PROACTIVE_2048_LEAVE 500 /* MB */
150 #define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
151 #define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
152 #define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
153 #define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
154 #define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
155 #define CGROUP_ROOT_2048_NUM_VICTIMS 10
157 /* threshold for more than 3072M RAM */
158 #define PROACTIVE_3072_THRES 300 /* MB */
159 #define PROACTIVE_3072_LEAVE 700 /* MB */
160 #define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
161 #define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
162 #define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
163 #define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
164 #define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
165 #define CGROUP_ROOT_3072_NUM_VICTIMS 10
167 static unsigned proactive_threshold_mb;
168 static unsigned proactive_leave_mb;
169 static unsigned lmk_start_threshold_mb;
172 * Resourced Low Memory Killer
173 * NOTE: planned to be moved to a separate file.
175 /*-------------------------------------------------*/
176 #define OOM_TIMER_INTERVAL_SEC 2
177 #define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
178 #define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
180 struct lowmem_control {
182 * For each queued request the following properties
183 * are required with two exceptions:
184 * - status is being set by LMK
185 * - callback is optional
187 /* Processing flags*/
189 /* Indictator for OOM score of targeted processes */
190 enum oom_score score;
192 /* Desired size to be restored - level to be reached (MB)*/
193 unsigned int size_mb;
194 /* Max number of processes to be considered */
196 /* Memory reclaim status */
199 * Optional - if set, will be triggered by LMK once the request
202 void (*callback) (struct lowmem_control *);
205 struct lowmem_worker {
206 pthread_t worker_thread;
212 static struct lowmem_worker lmw;
214 //static int memlog_enabled;
215 //static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
216 /* remove logfiles to reduce to this threshold.
217 * it is about five-sixths of the memlog_nr_max. */
218 //static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
219 //static char *memlog_path = DEFAULT_MEMLOG_PATH;
220 //static char *memlog_prefix[MEMLOG_MAX];
222 #define LOWMEM_WORKER_IS_ACTIVE(_lmw) g_atomic_int_get(&(_lmw)->active)
223 #define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
224 #define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
226 #define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
227 #define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
228 #define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
230 #define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
232 #define LOWMEM_DESTROY_REQUEST(_ctl) \
233 g_slice_free(typeof(*(_ctl)), _ctl); \
235 #define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
237 (c)->flags = __flags; (c)->score = __score; \
238 (c)->size_mb= __size; (c)->count = __count; \
239 (c)->callback = __cb; \
242 static void lowmem_queue_request(struct lowmem_worker *lmw,
243 struct lowmem_control *ctl)
245 if (LOWMEM_WORKER_IS_ACTIVE(lmw))
246 g_async_queue_push(lmw->queue, ctl);
250 static void lowmem_drain_queue(struct lowmem_worker *lmw)
252 struct lowmem_control *ctl;
254 g_async_queue_lock(lmw->queue);
255 while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
258 LOWMEM_DESTROY_REQUEST(ctl);
260 g_async_queue_unlock(lmw->queue);
263 static void lowmem_request_destroy(gpointer data)
265 struct lowmem_control *ctl = (struct lowmem_control*) data;
269 LOWMEM_DESTROY_REQUEST(ctl);
272 /*-------------------------------------------------*/
274 /* low memory action function for cgroup */
275 /* low memory action function */
276 static void high_mem_act(void);
277 static void swap_activate_act(void);
278 static void swap_compact_act(void);
279 static void lmk_act(void);
282 static size_t cur_mem_state = MEM_LEVEL_HIGH;
283 static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
285 static unsigned long long totalram_bytes;
286 static unsigned long totalram_kb;
288 static bool oom_popup_enable;
289 static bool oom_popup;
290 static bool memcg_swap_status;
291 static int fragmentation_size;
293 const char *lowmem_convert_cgroup_type_to_str(int type)
295 static const char *type_table[] =
297 if (type >= MEMCG_ROOT && type <= MEMCG_THROTTLING)
298 return type_table[type];
303 static const char *convert_status_to_str(int status)
305 static const char *status_table[] =
306 {"none", "done", "drop", "cont", "retry", "next_type"};
307 if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
308 return status_table[status];
309 return "error status";
312 static const char *convert_memstate_to_str(int mem_state)
314 static const char *state_table[] = {"mem high", "mem medium",
315 "mem low", "mem critical", "mem oom",};
316 if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
317 return state_table[mem_state];
321 static int lowmem_launch_oompopup(void)
323 GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
324 g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
326 GVariant *const params = g_variant_new("(a{ss})", gv_builder);
327 g_variant_builder_unref(gv_builder);
329 int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
330 SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
331 "PopupLaunch", params);
333 g_variant_unref(params);
338 static inline void get_total_memory(void)
345 totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
346 totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
348 register_totalram_bytes(totalram_bytes);
351 _E("Failed to get total ramsize from the kernel");
355 unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
357 unsigned int size_kb = 0, total_size_kb = 0;
362 * If pids are allocated only when there are multiple processes with
363 * the same pgid e.g., browser and web process. Mostly, single process
366 if (tsk->pids == NULL) {
367 ret = proc_get_ram_usage(tsk->pid, &size_kb);
369 /* If there is no proc entry for given pid the process
370 * should be abandoned during further processing
373 _D("failed to get rss memory usage of %d", tsk->pid);
378 for (index = 0; index < tsk->pids->len; index++) {
379 pid = g_array_index(tsk->pids, pid_t, index);
380 ret = proc_get_ram_usage(pid, &size_kb);
381 if (ret != RESOURCED_ERROR_NONE)
383 total_size_kb += size_kb;
386 return total_size_kb;
389 static void lowmem_free_task_info_array(GArray *array)
393 for (i = 0; i < array->len; i++) {
394 struct task_info *tsk;
396 tsk = &g_array_index(array, struct task_info, i);
398 g_array_free(tsk->pids, true);
401 g_array_free(array, true);
404 static inline int is_dynamic_process_killer(int flags)
406 return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
409 static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
411 unsigned int available = proc_get_mem_available();
412 unsigned int should_be_freed_mb = 0;
414 if (available < thres)
415 should_be_freed_mb = thres - available;
417 * free THRESHOLD_MARGIN more than real should be freed,
418 * because launching app is consuming up the memory.
420 if (should_be_freed_mb > 0)
421 should_be_freed_mb += THRESHOLD_MARGIN;
425 return should_be_freed_mb;
428 static void lowmem_oom_popup_once(void)
430 if (oom_popup_enable && !oom_popup) {
431 lowmem_launch_oompopup();
437 * @brief Terminate up to max_victims processes after finding them from pai.
438 It depends on proc_app_info lists
439 and it also reference systemservice cgroup
440 because some processes in this group don't have proc_app_info.
442 * @max_victims: max number of processes to be terminated
443 * @start_oom: find victims from start oom adj score value
444 * @end_oom: find victims to end oom adj score value
445 * @should_be_freed: amount of memory to be reclaimed (in MB)
446 * @total_size[out]: total size of possibly reclaimed memory (required)
447 * @completed: final outcome (optional)
448 * @threshold: desired value of memory available
450 static int lowmem_kill_victims(int max_victims,
451 int start_oom, int end_oom, unsigned should_be_freed, int flags,
452 unsigned int *total_size, int *completed, unsigned int threshold)
454 GSList *proc_app_list = NULL;
455 unsigned int total_victim_size = 0;
457 int status = LOWMEM_RECLAIM_NONE;
458 GArray *candidates = NULL;
460 proc_app_list = proc_app_list_open();
462 /* Get the victim candidates from lowmem governor */
463 candidates = lowmem_governor_get_kill_candidates(proc_app_list, start_oom, end_oom, flags);
465 proc_app_list_close();
466 proc_app_list = NULL;
468 if (!candidates->len) {
469 status = LOWMEM_RECLAIM_NEXT_TYPE;
473 victim_cnt = lowmem_controller_kill_candidates(candidates,
474 should_be_freed, threshold,
476 &status, &total_victim_size,
477 lmk_start_threshold_mb,
478 lowmem_oom_popup_once);
481 lowmem_free_task_info_array(candidates);
482 *total_size = total_victim_size;
483 if(*completed != LOWMEM_RECLAIM_CONT)
486 *completed = LOWMEM_RECLAIM_NEXT_TYPE;
490 static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
492 if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
493 _E("[LMK] oom score (%d) is out of scope", score);
494 return RESOURCED_ERROR_FAIL;
497 *max = cgroup_get_highest_oom_score_adj(score);
498 *min = cgroup_get_lowest_oom_score_adj(score);
500 return RESOURCED_ERROR_NONE;
503 static void lowmem_handle_request(struct lowmem_control *ctl)
505 int start_oom, end_oom;
506 int count = 0, victim_cnt = 0;
507 int max_victim_cnt = ctl->count;
508 int status = LOWMEM_RECLAIM_NONE;
509 unsigned int available_mb = 0;
510 unsigned int total_size_mb = 0;
511 unsigned int current_size = 0;
512 unsigned int reclaim_size_mb, shortfall_mb = 0;
513 enum oom_score oom_score = ctl->score;
515 available_mb = proc_get_mem_available();
516 reclaim_size_mb = ctl->size_mb > available_mb /* MB */
517 ? ctl->size_mb - available_mb : 0;
519 if (!reclaim_size_mb) {
520 status = LOWMEM_RECLAIM_DONE;
525 /* Prepare LMK to start doing it's job. Check preconditions. */
526 if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
529 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
530 shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
532 if (!shortfall_mb || !reclaim_size_mb) {
533 status = LOWMEM_RECLAIM_DONE;
539 victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
540 reclaim_size_mb, ctl->flags, ¤t_size, &status, ctl->size_mb);
543 current_size = KBYTE_TO_MBYTE(current_size);
544 reclaim_size_mb -= reclaim_size_mb > current_size
545 ? current_size : reclaim_size_mb;
546 total_size_mb += current_size;
548 _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
549 victim_cnt, current_size,
550 start_oom, end_oom, convert_status_to_str(status));
553 if ((status == LOWMEM_RECLAIM_DONE) ||
554 (status == LOWMEM_RECLAIM_DROP) ||
555 (status == LOWMEM_RECLAIM_RETRY))
559 * If it doesn't finish reclaiming memory in first operation,
560 - if flags has OOM_IN_DEPTH,
561 try to find victims again in the active cgroup.
562 otherwise, just return because there is no more victims in the desired cgroup.
563 - if flags has OOM_REVISE,
564 it means that resourced can't find victims from proc_app_list.
565 So, it should search victims or malicious process from /proc.
566 But searching /proc leads to abnormal behaviour.
567 (Make sluggish or kill same victims continuously)
568 Thus, otherwise, just return in first operation and wait some period.
570 if (oom_score == OOM_SCORE_LOW) {
571 oom_score = OOM_SCORE_MEDIUM;
573 } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
574 oom_score = OOM_SCORE_HIGH;
575 if(ctl->flags & OOM_FORCE)
576 max_victim_cnt = FOREGROUND_VICTIMS;
578 } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
579 status = LOWMEM_RECLAIM_RETRY;
580 ctl->score = OOM_SCORE_MAX;
582 else if (oom_score == OOM_SCORE_MAX) {
583 status = LOWMEM_RECLAIM_RETRY;
586 _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
587 count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
589 /* After we finish reclaiming it's worth to remove oldest memps logs */
590 ctl->status = status;
593 static void *lowmem_reclaim_worker(void *arg)
595 struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
597 setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
599 g_async_queue_ref(lmw->queue);
603 struct lowmem_control *ctl;
605 LOWMEM_WORKER_IDLE(lmw);
606 /* Wait on any wake-up call */
607 ctl = g_async_queue_pop(lmw->queue);
610 _W("[LMK] ctl structure is NULL");
614 if ((ctl->flags & OOM_DROP) || !LOWMEM_WORKER_IS_ACTIVE(lmw)) {
615 LOWMEM_DESTROY_REQUEST(ctl);
619 LOWMEM_WORKER_RUN(lmw);
621 _D("[LMK] %d tries", ++try_count);
622 lowmem_handle_request(ctl);
624 * Case the process failed to reclaim requested amount of memory
625 * or still under have memory pressure - try the timeout wait.
626 * There is a chance this will get woken-up in a better reality.
628 if (ctl->status == LOWMEM_RECLAIM_RETRY &&
629 !(ctl->flags & OOM_SINGLE_SHOT)) {
630 unsigned int available_mb = proc_get_mem_available();
632 if (available_mb >= ctl->size_mb) {
633 _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
634 ctl->size_mb, available_mb);
635 ctl->status = LOWMEM_RECLAIM_DONE;
638 LOWMEM_DESTROY_REQUEST(ctl);
639 LOWMEM_WORKER_IDLE(lmw);
643 if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
644 g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
645 ctl->flags |= OOM_REVISE;
651 * The ctl callback would check available size again.
652 * And it is last point in reclaiming worker.
653 * Resourced sent SIGKILL signal to victim processes
654 * so it should wait for a some seconds until each processes returns memory.
656 g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
660 /* The lmk becomes the owner of all queued requests .. */
661 LOWMEM_DESTROY_REQUEST(ctl);
662 LOWMEM_WORKER_IDLE(lmw);
664 g_async_queue_unref(lmw->queue);
668 static void change_lowmem_state(unsigned int mem_state)
670 cur_mem_state = mem_state;
671 lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
673 resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
674 (void *)&cur_mem_state);
677 /* only app can call this function
678 * that is, service cannot call the function
680 static void lowmem_swap_memory(char *path)
682 unsigned int available_mb;
684 if (cur_mem_state == MEM_LEVEL_HIGH)
687 if (swap_get_state() != SWAP_ON)
690 available_mb = proc_get_mem_available();
691 if (cur_mem_state != MEM_LEVEL_LOW &&
692 available_mb <= get_root_memcg_info()->threshold_mb[MEM_LEVEL_LOW])
695 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
696 memcg_swap_status = true;
699 void lowmem_trigger_swap(pid_t pid, char *path, bool move)
703 int lowest_oom_score_adj;
706 _E("[SWAP] Unknown memory cgroup path to swap");
710 /* In this case, corresponding process will be moved to memory MEMCG_THROTTLING.
713 error = proc_get_oom_score_adj(pid, &oom_score_adj);
715 _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
719 lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
721 if (oom_score_adj < lowest_oom_score_adj) {
722 oom_score_adj = lowest_oom_score_adj;
723 /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
724 proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
729 /* Correponding process is already managed per app or service.
730 * In addition, if some process is already located in the MEMCG_THROTTLING, then just do swap
732 resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
735 static void memory_level_send_system_event(int lv)
742 case MEM_LEVEL_MEDIUM:
744 str = EVT_VAL_MEMORY_NORMAL;
746 case MEM_LEVEL_CRITICAL:
747 str = EVT_VAL_MEMORY_SOFT_WARNING;
750 str = EVT_VAL_MEMORY_HARD_WARNING;
759 _E("Failed to create bundle");
763 bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
764 eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
768 static void high_mem_act(void)
772 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
774 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
775 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
776 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
777 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
778 memory_level_send_system_event(MEM_LEVEL_HIGH);
781 change_lowmem_state(MEM_LEVEL_HIGH);
783 if (swap_get_state() == SWAP_ON && memcg_swap_status) {
784 resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(MEMCG_THROTTLING));
785 memcg_swap_status = false;
787 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
788 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
789 (void *)CGROUP_FREEZER_ENABLED);
792 static void swap_activate_act(void)
796 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
798 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
800 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
801 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
802 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
803 memory_level_send_system_event(MEM_LEVEL_LOW);
805 change_lowmem_state(MEM_LEVEL_LOW);
806 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
807 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
808 (void *)CGROUP_FREEZER_ENABLED);
810 if (swap_get_state() != SWAP_ON)
811 resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
814 static void dedup_act(enum ksm_scan_mode mode)
819 if (dedup_get_state() != DEDUP_ONE_SHOT)
822 if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
823 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
824 (void *)CGROUP_FREEZER_ENABLED);
826 if (mode == KSM_SCAN_PARTIAL) {
827 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
829 _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
831 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
832 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
833 VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
834 memory_level_send_system_event(MEM_LEVEL_MEDIUM);
836 change_lowmem_state(MEM_LEVEL_MEDIUM);
838 data = KSM_SCAN_PARTIAL;
839 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
840 } else if (mode == KSM_SCAN_FULL) {
841 data = KSM_SCAN_FULL;
842 resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
846 static void swap_compact_act(void)
848 change_lowmem_state(MEM_LEVEL_CRITICAL);
849 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
850 memory_level_send_system_event(MEM_LEVEL_CRITICAL);
853 static void medium_cb(struct lowmem_control *ctl)
855 if (ctl->status == LOWMEM_RECLAIM_DONE)
857 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
860 static void lmk_act(void)
862 unsigned int available_mb;
864 int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
867 * Don't trigger reclaim worker
868 * if it is already running
870 if (LOWMEM_WORKER_IS_RUNNING(&lmw))
873 ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
875 _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
877 memory_level_send_system_event(MEM_LEVEL_OOM);
878 if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
879 if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
880 resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
881 (void *)CGROUP_FREEZER_PAUSED);
882 vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
883 VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
885 available_mb = proc_get_mem_available();
887 change_lowmem_state(MEM_LEVEL_OOM);
889 if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
890 struct lowmem_control *ctl;
892 ctl = LOWMEM_NEW_REQUEST();
894 LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
895 OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
896 num_max_victims, medium_cb);
897 lowmem_queue_request(&lmw, ctl);
901 resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
904 * Flush resourced memory such as other processes.
905 * Resourced can use both many fast bins and sqlite3 cache memery.
912 void lowmem_trigger_memory_state_action(int mem_state)
915 * Check if the state we want to set is different from current
916 * But it should except this condition if mem_state is already medium.
917 * Otherwise, recalim worker couldn't run any more.
919 if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
926 case MEM_LEVEL_MEDIUM:
927 dedup_act(KSM_SCAN_PARTIAL);
932 case MEM_LEVEL_CRITICAL:
933 dedup_act(KSM_SCAN_FULL);
944 static unsigned int check_mem_state(unsigned int available_mb)
947 for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
948 if (mem_state != MEM_LEVEL_OOM &&
949 available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
951 else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
958 /* setup memcg parameters depending on total ram size. */
959 static void setup_memcg_params(void)
961 unsigned long total_ramsize_mb;
964 total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
966 _D("Total: %lu MB", total_ramsize_mb);
967 if (total_ramsize_mb <= MEM_SIZE_64) {
968 /* set thresholds for ram size 64M */
969 proactive_threshold_mb = PROACTIVE_64_THRES;
970 proactive_leave_mb = PROACTIVE_64_LEAVE;
971 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
972 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
973 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
974 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
975 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
976 num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
977 } else if (total_ramsize_mb <= MEM_SIZE_256) {
978 /* set thresholds for ram size 256M */
979 proactive_threshold_mb = PROACTIVE_256_THRES;
980 proactive_leave_mb = PROACTIVE_256_LEAVE;
981 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
982 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
983 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
984 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
985 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
986 num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
987 } else if (total_ramsize_mb <= MEM_SIZE_448) {
988 /* set thresholds for ram size 448M */
989 proactive_threshold_mb = PROACTIVE_448_THRES;
990 proactive_leave_mb = PROACTIVE_448_LEAVE;
991 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
992 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
993 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
994 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
995 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
996 num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
997 } else if (total_ramsize_mb <= MEM_SIZE_512) {
998 /* set thresholds for ram size 512M */
999 proactive_threshold_mb = PROACTIVE_512_THRES;
1000 proactive_leave_mb = PROACTIVE_512_LEAVE;
1001 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
1002 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
1003 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
1004 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
1005 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
1006 num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
1007 } else if (total_ramsize_mb <= MEM_SIZE_768) {
1008 /* set thresholds for ram size 768M */
1009 proactive_threshold_mb = PROACTIVE_768_THRES;
1010 proactive_leave_mb = PROACTIVE_768_LEAVE;
1011 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
1012 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
1013 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
1014 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
1015 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
1016 num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
1017 } else if (total_ramsize_mb <= MEM_SIZE_1024) {
1018 /* set thresholds for ram size more than 1G */
1019 proactive_threshold_mb = PROACTIVE_1024_THRES;
1020 proactive_leave_mb = PROACTIVE_1024_LEAVE;
1021 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
1022 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
1023 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
1024 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
1025 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
1026 num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
1027 } else if (total_ramsize_mb <= MEM_SIZE_2048) {
1028 proactive_threshold_mb = PROACTIVE_2048_THRES;
1029 proactive_leave_mb = PROACTIVE_2048_LEAVE;
1030 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
1031 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
1032 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
1033 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
1034 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
1035 num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
1037 proactive_threshold_mb = PROACTIVE_3072_THRES;
1038 proactive_leave_mb = PROACTIVE_3072_LEAVE;
1039 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
1040 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
1041 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
1042 memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
1043 memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
1044 num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
1048 static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
1050 int cur_oom_score_adj;
1052 struct memcg_info *mi;
1053 int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
1055 mi = get_memcg_info(next_memcg_idx);
1062 cgroup_write_pid_fullpath(mi->name, pid);
1067 if (pai->main_pid == pid) {
1068 cur_oom_score_adj = pai->memory.oom_score_adj;
1069 cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
1071 if (cur_oom_score_adj == next_oom_score_adj) {
1072 _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
1076 proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
1078 if (!lowmem_limit_move_cgroup(pai))
1081 if(cur_memcg_idx == next_memcg_idx)
1084 _I("app (%s) memory cgroup move from %s to %s", pai->appid, lowmem_convert_cgroup_type_to_str(cur_memcg_idx), lowmem_convert_cgroup_type_to_str(next_memcg_idx));
1085 cgroup_write_pid_fullpath(mi->name, pid);
1086 if (next_memcg_idx == MEMCG_THROTTLING)
1087 lowmem_swap_memory(get_memcg_info(MEMCG_THROTTLING)->name);
1091 if (pai->memory.use_mem_limit)
1094 cgroup_write_pid_fullpath(mi->name, pid);
1098 static int lowmem_activate_worker(void)
1100 int ret = RESOURCED_ERROR_NONE;
1102 if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
1106 lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
1108 _E("Failed to create request queue\n");
1109 return RESOURCED_ERROR_FAIL;
1111 LOWMEM_WORKER_ACTIVATE(&lmw);
1112 ret = pthread_create(&lmw.worker_thread, NULL,
1113 (void *)lowmem_reclaim_worker, (void *)&lmw);
1115 LOWMEM_WORKER_DEACTIVATE(&lmw);
1116 _E("Failed to create LMK thread: %d\n", ret);
1118 pthread_detach(lmw.worker_thread);
1119 ret = RESOURCED_ERROR_NONE;
1124 static void lowmem_deactivate_worker(void)
1126 struct lowmem_control *ctl;
1128 if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
1131 LOWMEM_WORKER_DEACTIVATE(&lmw);
1132 lowmem_drain_queue(&lmw);
1134 ctl = LOWMEM_NEW_REQUEST();
1136 _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
1139 ctl->flags = OOM_DROP;
1140 g_async_queue_push(lmw.queue, ctl);
1141 g_async_queue_unref(lmw.queue);
1144 static void lowmem_press_root_cgroup_handler(void)
1146 static unsigned int prev_available_mb;
1147 unsigned int available_mb;
1150 available_mb = proc_get_mem_available();
1151 if (prev_available_mb == available_mb)
1154 mem_state = check_mem_state(available_mb);
1155 lowmem_trigger_memory_state_action(mem_state);
1156 prev_available_mb = available_mb;
1159 static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
1161 lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
1164 int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
1166 struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
1171 flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
1172 victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
1173 score = score > 0 ? score : OOM_SCORE_LOW;
1174 threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
1176 lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
1177 LOWMEM_SET_REQUEST(ctl, flags,
1178 score, threshold_mb, victims,
1179 lowmem_force_reclaim_cb);
1180 lowmem_queue_request(&lmw, ctl);
1185 void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
1187 int size_mb, victims;
1189 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1190 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1192 size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
1193 lowmem_trigger_reclaim(0, victims, score, size_mb);
1196 bool lowmem_fragmentated(void)
1198 struct buddyinfo bi;
1201 ret = proc_get_buddyinfo("Normal", &bi);
1206 * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
1207 * If total buddy pages is smaller than fragmentation_size,
1208 * resourced will detect kernel memory is fragmented.
1209 * Default value is zero in low memory device.
1211 if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
1212 (bi.page[PAGE_256K] << 3) < fragmentation_size) {
1213 _I("fragmentation detected, need to execute proactive oom killer");
1219 static void lowmem_proactive_oom_killer(int flags, char *appid)
1221 unsigned int before_mb;
1224 before_mb = proc_get_mem_available();
1226 /* If memory state is medium or normal, just return and kill in oom killer */
1227 if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
1228 before_mb > proactive_leave_mb)
1231 victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
1232 ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
1234 #ifdef HEART_SUPPORT
1236 * This branch is used only when HEART module is compiled in and
1237 * it's MEMORY module must be enabled. Otherwise this is skipped.
1239 struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
1241 unsigned int rss_mb, after_mb, size_mb;
1243 rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
1247 after_mb = before_mb - rss_mb;
1249 * after launching app, ensure that available memory is
1250 * above threshold_leave
1252 if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1255 if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
1256 size_mb = proactive_threshold_mb;
1258 size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
1260 _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
1261 rss_mb, before_mb, size_mb);
1262 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
1269 * When there is no history data for the launching app,
1270 * it is necessary to check current fragmentation state or application manifest file.
1271 * So, resourced feels proactive LMK is required, run oom killer based on dynamic
1274 if (lowmem_fragmentated())
1278 * run proactive oom killer only when available is larger than
1279 * dynamic process threshold
1281 if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
1284 if (!(flags & PROC_LARGEMEMORY))
1289 * free THRESHOLD_MARGIN more than real should be freed,
1290 * because launching app is consuming up the memory.
1292 _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
1293 proactive_leave_mb + THRESHOLD_MARGIN);
1294 lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
1297 unsigned int lowmem_get_proactive_thres(void)
1299 return proactive_threshold_mb;
1302 static int lowmem_prelaunch_handler(void *data)
1304 struct proc_status *ps = (struct proc_status *)data;
1305 struct proc_app_info *pai = ps->pai;
1307 if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
1308 return RESOURCED_ERROR_NONE;
1310 lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
1311 return RESOURCED_ERROR_NONE;
1314 int lowmem_control_handler(void *data)
1316 struct lowmem_control_data *lowmem_data;
1318 lowmem_data = (struct lowmem_control_data *)data;
1319 switch (lowmem_data->control_type) {
1320 case LOWMEM_MOVE_CGROUP:
1321 lowmem_move_memcgroup((pid_t)lowmem_data->pid,
1322 lowmem_data->oom_score_adj, lowmem_data->pai);
1327 return RESOURCED_ERROR_NONE;
1330 static inline int calculate_threshold_size(double ratio)
1332 unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
1333 return BYTE_TO_MBYTE(size_bytes);
1336 static void load_configs(void)
1338 struct memcg_conf *memcg_conf = get_memcg_conf();
1340 /* set MemoryGroupLimit section */
1341 for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
1342 if (memcg_conf->cgroup_limit[cgroup] > 0.0)
1343 memcg_info_set_limit(get_memcg_info(cgroup),
1344 memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
1347 /* set MemoryLevelThreshold section */
1348 for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
1349 if (memcg_conf->threshold[lvl].percent &&
1350 memcg_conf->threshold[lvl].threshold > 0) {
1351 memcg_set_threshold(MEMCG_ROOT, lvl,
1352 calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
1354 if (lvl == MEM_LEVEL_OOM) {
1355 memcg_set_leave_threshold(MEMCG_ROOT,
1356 get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
1357 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1358 proactive_leave_mb = proactive_threshold_mb * 1.5;
1361 else if (memcg_conf->threshold[lvl].threshold > 0) {
1362 memcg_set_threshold(MEMCG_ROOT, lvl,
1363 memcg_conf->threshold[lvl].threshold);
1365 if (lvl == MEM_LEVEL_OOM) {
1366 memcg_set_leave_threshold(MEMCG_ROOT,
1367 get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
1368 proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
1369 proactive_leave_mb = proactive_threshold_mb * 1.5;
1374 oom_popup_enable = memcg_conf->oom_popup;
1376 /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
1377 lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
1378 memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
1379 lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
1380 memcg_conf->guiapp.action, memcg_conf->background.action);
1385 static void print_mem_configs(void)
1387 /* print info of Memory section */
1388 for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
1389 _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
1390 lowmem_convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
1393 for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
1394 for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
1395 _I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", lowmem_convert_cgroup_type_to_str(cgroup),
1396 convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
1400 _I("[LMK] set number of max victims as %d", num_max_victims);
1401 _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
1402 _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
1403 _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
1405 /* print info of POPUP section */
1406 _I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
1409 /* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
1410 static int lowmem_init(void)
1412 int ret = RESOURCED_ERROR_NONE;
1414 _D("resourced memory init start");
1417 ret = memcg_make_full_subdir(MEMCG_PATH);
1418 ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
1419 memcg_params_init();
1421 setup_memcg_params();
1423 /* default configuration */
1426 /* this function should be called after parsing configurations */
1427 memcg_write_limiter_params();
1428 print_mem_configs();
1430 /* make a worker thread called low memory killer */
1431 ret = lowmem_activate_worker();
1433 _E("[LMK] oom thread create failed\n");
1437 /* register threshold and event fd */
1438 ret = lowmem_monitor_pressure_initialize(
1439 lowmem_press_root_cgroup_handler);
1441 _E("[MEMORY-LIMIT] eventfd setup failed");
1446 lowmem_limit_init();
1447 lowmem_system_init();
1449 register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1450 register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
1455 static int lowmem_exit(void)
1457 lowmem_deactivate_worker();
1458 lowmem_limit_exit();
1459 lowmem_system_exit();
1461 unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
1462 unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
1464 return RESOURCED_ERROR_NONE;
1467 static int resourced_memory_init(void *data)
1469 return lowmem_init();
1472 static int resourced_memory_finalize(void *data)
1474 return lowmem_exit();
1477 void lowmem_change_memory_state(int state, int force)
1484 unsigned int available_mb = proc_get_mem_available();
1485 mem_state = check_mem_state(available_mb);
1488 lowmem_trigger_memory_state_action(mem_state);
1491 unsigned long lowmem_get_ktotalram(void)
1496 unsigned long long lowmem_get_totalram(void)
1498 return totalram_bytes;
1501 void lowmem_restore_memcg(struct proc_app_info *pai)
1505 struct cgroup *cgroup = NULL;
1506 struct memcg_info *mi = NULL;
1507 pid_t pid = pai->main_pid;
1509 ret = cgroup_pid_get_path("memory", pid, &cgpath);
1513 for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
1514 cgroup = get_cgroup_tree(index);
1518 mi = cgroup->memcg_info;
1522 if (!strcmp(cgroup->hashname, ""))
1524 if (strstr(cgpath, cgroup->hashname))
1527 pai->memory.memcg_idx = index;
1528 pai->memory.memcg_info = mi;
1529 if(strstr(cgpath, pai->appid))
1530 pai->memory.use_mem_limit = true;
1535 static struct module_ops memory_modules_ops = {
1536 .priority = MODULE_PRIORITY_EARLY,
1538 .init = resourced_memory_init,
1539 .exit = resourced_memory_finalize,
1542 MODULE_REGISTER(&memory_modules_ops)