#define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
#define CGROUP_ROOT_3072_NUM_VICTIMS 10
-static unsigned proactive_threshold;
-static unsigned proactive_leave;
-static unsigned lmk_start_threshold;
+static unsigned proactive_threshold_mb;
+static unsigned proactive_leave_mb;
+static unsigned lmk_start_threshold_mb;
static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
enum cgroup_type type;
/* Desired size to be restored - level to be reached (MB)*/
- unsigned int size;
+ unsigned int size_mb;
/* Max number of processes to be considered */
unsigned int count;
/* Memory reclaim status */
#define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb) \
{ \
(c)->flags = __flags; (c)->type = __type; \
- (c)->size = __size; (c)->count = __count; \
+ (c)->size_mb= __size; (c)->count = __count; \
(c)->callback = __cb; \
}
static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
-static unsigned long totalram;
-static unsigned long ktotalram;
+static unsigned long long totalram_bytes;
+static unsigned long totalram_kb;
static struct module_ops memory_modules_ops;
static const struct module_ops *lowmem_ops;
static inline void get_total_memory(void)
{
struct sysinfo si;
- if (totalram)
+ if (totalram_bytes)
return;
if (!sysinfo(&si)) {
- totalram = si.totalram;
- ktotalram = BYTE_TO_KBYTE(totalram);
+ totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
+ totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
+
+ register_totalram_bytes(totalram_bytes);
+ }
+ else {
+ _E("Failed to get total ramsize from the kernel");
}
}
unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
{
- unsigned int size = 0, total_size = 0;
+ unsigned int size_kb = 0, total_size_kb = 0;
int index, ret;
pid_t pid;
* is used.
*/
if (tsk->pids == NULL) {
- ret = proc_get_ram_usage(tsk->pid, &size);
+ ret = proc_get_ram_usage(tsk->pid, &size_kb);
/* If there is no proc entry for given pid the process
* should be abandoned during further processing
if (ret < 0)
_D("failed to get rss memory usage of %d", tsk->pid);
- return size;
+ return size_kb;
}
for (index = 0; index < tsk->pids->len; index++) {
pid = g_array_index(tsk->pids, pid_t, index);
- ret = proc_get_ram_usage(pid, &size);
+ ret = proc_get_ram_usage(pid, &size_kb);
if (ret != RESOURCED_ERROR_NONE)
continue;
- total_size += size;
+ total_size_kb += size_kb;
}
- return total_size;
+ return total_size_kb;
}
static int lowmem_kill_victim(const struct task_info *tsk,
else
safe_kill(pid, SIGKILL);
- _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u, sigterm = %d\n",
+ _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u KB, sigterm = %d\n",
flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
tsk->size, sigterm);
*victim_size = tsk->size;
/* return LOWMEM_RECLAIM_CONT when killing should be continued */
static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
{
- unsigned int available;
+ unsigned int available_mb;
/*
* Processes with the priority higher than perceptible are killed
tsk->pid, flags);
return LOWMEM_RECLAIM_DROP;
}
- available = proc_get_mem_available();
- if (available > lmk_start_threshold) {
+ available_mb = proc_get_mem_available();
+ if (available_mb > lmk_start_threshold_mb) {
_I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
- available, lmk_start_threshold);
+ available_mb, lmk_start_threshold_mb);
return LOWMEM_RECLAIM_RETRY;
}
return LOWMEM_RECLAIM_CONT;
* followed by kernel badness point calculation using heuristic.
* oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
*/
- pa = ta->oom_score_lru * (ktotalram / 2000) + ta->size;
- pb = tb->oom_score_lru * (ktotalram / 2000) + tb->size;
+ pa = ta->oom_score_lru * (totalram_kb / 2000) + ta->size;
+ pb = tb->oom_score_lru * (totalram_kb / 2000) + tb->size;
return pb - pa;
}
static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
{
unsigned int available = proc_get_mem_available();
- unsigned int should_be_freed = 0;
+ unsigned int should_be_freed_mb = 0;
if (available < thres)
- should_be_freed = thres - available;
+ should_be_freed_mb = thres - available;
/*
* free THRESHOLD_MARGIN more than real should be freed,
* because launching app is consuming up the memory.
*/
- if (should_be_freed > 0)
- should_be_freed += THRESHOLD_MARGIN;
+ if (should_be_freed_mb > 0)
+ should_be_freed_mb += THRESHOLD_MARGIN;
*avail = available;
- return should_be_freed;
+ return should_be_freed_mb;
}
static int lowmem_get_pids_proc(GArray *pids)
*/
static int lowmem_kill_victims(int max_victims,
int start_oom, int end_oom, unsigned should_be_freed, int flags,
- unsigned int *total_size, int *completed, int threshold)
+ unsigned int *total_size, int *completed, unsigned int threshold)
{
int total_count = 0;
GSList *proc_app_list = NULL;
struct task_info *tsk;
tsk = &g_array_index(candidates, struct task_info, i);
- tsk->size = lowmem_get_task_mem_usage_rss(tsk);
+ tsk->size = lowmem_get_task_mem_usage_rss(tsk); /* KB */
}
/*
int count = 0, victim_cnt = 0;
int max_victim_cnt = ctl->count;
int status = LOWMEM_RECLAIM_NONE;
- unsigned int available = 0;
- unsigned int total_size = 0;
+ unsigned int available_mb = 0;
+ unsigned int total_size_mb = 0;
unsigned int current_size = 0;
- unsigned int reclaim_size, shortfall = 0;
+ unsigned int reclaim_size_mb, shortfall_mb = 0;
enum cgroup_type cgroup_type = ctl->type;
- available = proc_get_mem_available();
- reclaim_size = ctl->size > available
- ? ctl->size - available : 0;
+ available_mb = proc_get_mem_available();
+ reclaim_size_mb = ctl->size_mb > available_mb /* MB */
+ ? ctl->size_mb - available_mb : 0;
- if (!reclaim_size) {
+ if (!reclaim_size_mb) {
status = LOWMEM_RECLAIM_DONE;
goto done;
}
if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
goto done;
- lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
- shortfall = is_memory_recovered(&available, ctl->size);
+ lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
+ shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
- if (!shortfall || !reclaim_size) {
+ if (!shortfall_mb || !reclaim_size_mb) {
status = LOWMEM_RECLAIM_DONE;
goto done;
}
/* precaution */
current_size = 0;
victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
- reclaim_size, ctl->flags, ¤t_size, &status, ctl->size);
+ reclaim_size_mb, ctl->flags, ¤t_size, &status, ctl->size_mb);
if (victim_cnt) {
current_size = KBYTE_TO_MBYTE(current_size);
- reclaim_size -= reclaim_size > current_size
- ? current_size : reclaim_size;
- total_size += current_size;
+ reclaim_size_mb -= reclaim_size_mb > current_size
+ ? current_size : reclaim_size_mb;
+ total_size_mb += current_size;
count += victim_cnt;
- _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
+ _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
victim_cnt, current_size,
start_oom, end_oom, convert_status_to_str(status));
}
}
done:
_I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
- count, total_size, reclaim_size, shortfall, convert_status_to_str(status));
+ count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
/* After we finish reclaiming it's worth to remove oldest memps logs */
ctl->status = status;
*/
if (ctl->status == LOWMEM_RECLAIM_RETRY &&
!(ctl->flags & OOM_SINGLE_SHOT)) {
- unsigned int available = proc_get_mem_available();
+ unsigned int available_mb = proc_get_mem_available();
- if (available >= ctl->size) {
+ if (available_mb >= ctl->size_mb) {
_I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
- ctl->size, available);
+ ctl->size_mb, available_mb);
ctl->status = LOWMEM_RECLAIM_DONE;
if (ctl->callback)
ctl->callback(ctl);
static void change_lowmem_state(unsigned int mem_state)
{
cur_mem_state = mem_state;
- lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
+ lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
(void *)&cur_mem_state);
*/
static void lowmem_swap_memory(char *path)
{
- unsigned int available;
+ unsigned int available_mb;
if (cur_mem_state == MEM_LEVEL_HIGH)
return;
if (swap_get_state() != SWAP_ON)
return;
- available = proc_get_mem_available();
+ available_mb = proc_get_mem_available();
if (cur_mem_state != MEM_LEVEL_LOW &&
- available <= get_root_memcg_info()->threshold[MEM_LEVEL_LOW])
+ available_mb <= get_root_memcg_info()->threshold_mb[MEM_LEVEL_LOW])
swap_activate_act();
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
static void lmk_act(void)
{
- unsigned int available;
+ unsigned int available_mb;
int ret;
int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
}
- available = proc_get_mem_available();
+ available_mb = proc_get_mem_available();
change_lowmem_state(MEM_LEVEL_OOM);
- if (available < get_root_memcg_info()->threshold_leave) {
+ if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
struct lowmem_control *ctl;
ctl = LOWMEM_NEW_REQUEST();
if (ctl) {
LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
- CGROUP_LOW, get_root_memcg_info()->threshold_leave,
+ CGROUP_LOW, get_root_memcg_info()->threshold_leave_mb,
num_max_victims, medium_cb);
lowmem_queue_request(&lmw, ctl);
}
}
}
-static unsigned int check_mem_state(unsigned int available)
+static unsigned int check_mem_state(unsigned int available_mb)
{
int mem_state;
for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
- if (mem_state != MEM_LEVEL_OOM && available <= get_root_memcg_info()->threshold[mem_state])
+ if (mem_state != MEM_LEVEL_OOM &&
+ available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
break;
- else if (mem_state == MEM_LEVEL_OOM && available <= lmk_start_threshold)
+ else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
break;
}
/* setup memcg parameters depending on total ram size. */
static void setup_memcg_params(void)
{
- unsigned long long total_ramsize;
+ unsigned long total_ramsize_mb;
get_total_memory();
- total_ramsize = BYTE_TO_MBYTE(totalram);
+ total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
- _D("Total: %llu MB", total_ramsize);
- if (total_ramsize <= MEM_SIZE_64) {
+ _D("Total: %lu MB", total_ramsize_mb);
+ if (total_ramsize_mb <= MEM_SIZE_64) {
/* set thresholds for ram size 64M */
- proactive_threshold = PROACTIVE_64_THRES;
- proactive_leave = PROACTIVE_64_LEAVE;
+ proactive_threshold_mb = PROACTIVE_64_THRES;
+ proactive_leave_mb = PROACTIVE_64_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_256) {
+ } else if (total_ramsize_mb <= MEM_SIZE_256) {
/* set thresholds for ram size 256M */
- proactive_threshold = PROACTIVE_256_THRES;
- proactive_leave = PROACTIVE_256_LEAVE;
+ proactive_threshold_mb = PROACTIVE_256_THRES;
+ proactive_leave_mb = PROACTIVE_256_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_448) {
+ } else if (total_ramsize_mb <= MEM_SIZE_448) {
/* set thresholds for ram size 448M */
- proactive_threshold = PROACTIVE_448_THRES;
- proactive_leave = PROACTIVE_448_LEAVE;
+ proactive_threshold_mb = PROACTIVE_448_THRES;
+ proactive_leave_mb = PROACTIVE_448_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_512) {
+ } else if (total_ramsize_mb <= MEM_SIZE_512) {
/* set thresholds for ram size 512M */
- proactive_threshold = PROACTIVE_512_THRES;
- proactive_leave = PROACTIVE_512_LEAVE;
+ proactive_threshold_mb = PROACTIVE_512_THRES;
+ proactive_leave_mb = PROACTIVE_512_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_768) {
+ } else if (total_ramsize_mb <= MEM_SIZE_768) {
/* set thresholds for ram size 512M */
- proactive_threshold = PROACTIVE_768_THRES;
- proactive_leave = PROACTIVE_768_LEAVE;
+ proactive_threshold_mb = PROACTIVE_768_THRES;
+ proactive_leave_mb = PROACTIVE_768_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_1024) {
+ } else if (total_ramsize_mb <= MEM_SIZE_1024) {
/* set thresholds for ram size more than 1G */
- proactive_threshold = PROACTIVE_1024_THRES;
- proactive_leave = PROACTIVE_1024_LEAVE;
+ proactive_threshold_mb = PROACTIVE_1024_THRES;
+ proactive_leave_mb = PROACTIVE_1024_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_2048) {
- proactive_threshold = PROACTIVE_2048_THRES;
- proactive_leave = PROACTIVE_2048_LEAVE;
+ } else if (total_ramsize_mb <= MEM_SIZE_2048) {
+ proactive_threshold_mb = PROACTIVE_2048_THRES;
+ proactive_leave_mb = PROACTIVE_2048_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
} else {
- proactive_threshold = PROACTIVE_3072_THRES;
- proactive_leave = PROACTIVE_3072_LEAVE;
+ proactive_threshold_mb = PROACTIVE_3072_THRES;
+ proactive_leave_mb = PROACTIVE_3072_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
cur_oom_score_adj = pai->memory.oom_score_adj;
cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
- /* -1 means that this pid is not yet registered at the memory cgroup
+ /* This pid is not yet registered at the memory cgroup.
* plz, reference proc_create_app_info function
*/
if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
static int lowmem_press_eventfd_read(int fd)
{
- uint64_t dummy_state;
+ unsigned long long dummy_state;
return read(fd, &dummy_state, sizeof(dummy_state));
}
static void lowmem_press_root_cgroup_handler(void)
{
- static unsigned int prev_available;
- unsigned int available;
+ static unsigned int prev_available_mb;
+ unsigned int available_mb;
int mem_state;
- available = proc_get_mem_available();
- if (prev_available == available)
+ available_mb = proc_get_mem_available();
+ if (prev_available_mb == available_mb)
return;
- mem_state = check_mem_state(available);
+ mem_state = check_mem_state(available_mb);
lowmem_trigger_memory_state_action(mem_state);
-
- prev_available = available;
+ prev_available_mb = available_mb;
}
static bool lowmem_press_eventfd_handler(int fd, void *data)
}
}
- return true;
+ return false;
}
static int lowmem_press_register_eventfd(struct memcg_info *mi)
const char *name = mi->name;
static fd_handler_h handler;
- if (mi->threshold[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
+ if (mi->threshold_mb[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
return 0;
evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
mi->evfd = evfd;
- _I("Register event fd success for %s cgroup", name);
add_fd_read_handler(evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
return 0;
}
lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
}
-int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold)
+int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold_mb)
{
struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
type = type > 0 ? type : CGROUP_LOW;
- threshold = threshold > 0 ? threshold : get_root_memcg_info()->threshold_leave;
+ threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
LOWMEM_SET_REQUEST(ctl, flags,
- type, threshold, victims,
+ type, threshold_mb, victims,
lowmem_force_reclaim_cb);
lowmem_queue_request(&lmw, ctl);
return 0;
}
-void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size)
+void lowmem_trigger_swap_reclaim(enum cgroup_type type, unsigned long long swap_size_bytes)
{
- int size, victims;
+ int size_mb, victims;
victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
- size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
- _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
- lowmem_trigger_reclaim(0, victims, type, size);
+ size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
+ lowmem_trigger_reclaim(0, victims, type, size_mb);
}
bool lowmem_fragmentated(void)
static void lowmem_proactive_oom_killer(int flags, char *appid)
{
- unsigned int before;
+ unsigned int before_mb;
int victims;
- before = proc_get_mem_available();
+ before_mb = proc_get_mem_available();
/* If memory state is medium or normal, just return and kill in oom killer */
- if (before < get_root_memcg_info()->threshold[MEM_LEVEL_OOM] || before > proactive_leave)
+ if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
+ before_mb > proactive_leave_mb)
return;
victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
*/
struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
if (md) {
- unsigned int rss, after, size;
+ unsigned int rss_mb, after_mb, size_mb;
- rss = KBYTE_TO_MBYTE(md->avg_rss);
+ rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
free(md);
- after = before - rss;
+ after_mb = before_mb - rss_mb;
/*
* after launching app, ensure that available memory is
* above threshold_leave
*/
- if (after >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
+ if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
return;
- if (proactive_threshold - rss >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
- size = proactive_threshold;
+ if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
+ size_mb = proactive_threshold_mb;
else
- size = rss + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
+ size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
_D("history based proactive LMK : avg rss %u, available %u required = %u MB",
- rss, before, size);
- lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
+ rss_mb, before_mb, size_mb);
+ lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size_mb);
return;
}
* run proactive oom killer only when available is larger than
* dynamic process threshold
*/
- if (!proactive_threshold || before >= proactive_threshold)
+ if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
return;
if (!(flags & PROC_LARGEMEMORY))
* free THRESHOLD_MARGIN more than real should be freed,
* because launching app is consuming up the memory.
*/
- _D("Run threshold based proactive LMK: memory level to reach: %u\n",
- proactive_leave + THRESHOLD_MARGIN);
- lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
+ _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
+ proactive_leave_mb + THRESHOLD_MARGIN);
+ lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
}
unsigned int lowmem_get_proactive_thres(void)
{
- return proactive_threshold;
+ return proactive_threshold_mb;
}
static int lowmem_prelaunch_handler(void *data)
static inline int calculate_threshold_size(double ratio)
{
- int size = (double)totalram * ratio / 100.0;
- return BYTE_TO_MBYTE(size);
+ unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
+ return BYTE_TO_MBYTE(size_bytes);
}
static void load_configs(const char *path)
for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
if (memcg_conf->cgroup_limit[cgroup] > 0.0)
memcg_info_set_limit(get_memcg_info(cgroup),
- memcg_conf->cgroup_limit[cgroup]/100.0, totalram);
+ memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
}
/* set MemoryLevelThreshold section */
if (lvl == MEM_LEVEL_OOM)
memcg_set_leave_threshold(CGROUP_ROOT,
- get_memcg_info(CGROUP_ROOT)->threshold[lvl] * 2);
+ get_memcg_info(CGROUP_ROOT)->threshold_mb[lvl] * 2);
}
else if (memcg_conf->threshold[lvl].threshold > 0) {
memcg_set_threshold(CGROUP_ROOT, lvl,
if (lvl == MEM_LEVEL_OOM)
memcg_set_leave_threshold(CGROUP_ROOT,
- get_memcg_info(CGROUP_ROOT)->threshold[lvl] * 2);
+ get_memcg_info(CGROUP_ROOT)->threshold_mb[lvl] * 2);
}
}
oom_popup_enable = memcg_conf->oom_popup;
/* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
- lowmem_memory_init(memcg_conf->service.memory, memcg_conf->widget.memory,
- memcg_conf->guiapp.memory, memcg_conf->background.memory);
+ lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
+ memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
memcg_conf->guiapp.action, memcg_conf->background.action);
{
/* print info of Memory section */
for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
- _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %u bytes",
- convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit);
+ _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
+ convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
}
for (int cgroup = CGROUP_ROOT; cgroup < CGROUP_END; cgroup++) {
for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
_I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", convert_cgroup_type_to_str(cgroup),
- convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold[mem_lvl]);
+ convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
}
}
_I("[LMK] set number of max victims as %d", num_max_victims);
- _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
- _I("[LMK] set proactive threshold to %u MB", proactive_threshold);
- _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave);
+ _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
+ _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
+ _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
/* print info of POPUP section */
_I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
if (force) {
mem_state = state;
} else {
- unsigned int available = proc_get_mem_available();
- mem_state = check_mem_state(available);
+ unsigned int available_mb = proc_get_mem_available();
+ mem_state = check_mem_state(available_mb);
}
lowmem_trigger_memory_state_action(mem_state);
unsigned long lowmem_get_ktotalram(void)
{
- return ktotalram;
+ return totalram_kb;
}
-unsigned long lowmem_get_totalram(void)
+unsigned long long lowmem_get_totalram(void)
{
- return totalram;
+ return totalram_bytes;
}
void lowmem_restore_memcg(struct proc_app_info *pai)