return MEMCG_ROOT;
}
-int cgroup_get_lowest_oom_score_adj(int score)
-{
- if (score < OOM_SCORE_HIGH || score > OOM_SCORE_MAX) {
- _E("oom score should be located between OOM_SCORE_HIGH and OOM_SCORE_MAX");
- }
-
- if (score == OOM_SCORE_HIGH)
- return OOMADJ_SU;
- else if (score == OOM_SCORE_MEDIUM)
- return OOMADJ_BACKGRD_PERCEPTIBLE;
- else if (score == OOM_SCORE_LOW)
- return OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE;
- else
- return OOMADJ_SU;
-}
-
-int cgroup_get_highest_oom_score_adj(int score)
-{
- if (score < OOM_SCORE_HIGH || score > OOM_SCORE_MAX) {
- _E("oom score should be located between OOM_SCORE_HIGH and OOM_SCORE_MAX");
- }
-
- if (score == OOM_SCORE_HIGH)
- return OOMADJ_FOREGRD_UNLOCKED;
- else if (score == OOM_SCORE_MEDIUM)
- return OOMADJ_BACKGRD_UNLOCKED;
- else
- return OOMADJ_APP_MAX;
+int cgroup_get_lowest_oom_score_adj(enum oom_level oom_level)
+{
+ switch (oom_level) {
+ case OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED:
+ return OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE;
+ case OOM_LEVEL_BACKGROUND_MOST_RECENTLY_USED:
+ return OOMADJ_BACKGRD_PERCEPTIBLE;
+ case OOM_LEVEL_FOREGROUND_AND_PROC:
+ return OOMADJ_SU;
+ default:
+ if (oom_level < OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED
+ || oom_level > OOM_LEVEL_ALL)
+ _E("oom level is out of range");
+
+ return OOMADJ_SU;
+ }
+}
+
+int cgroup_get_highest_oom_score_adj(enum oom_level oom_level)
+{
+ switch (oom_level) {
+ case OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED:
+ return OOMADJ_APP_MAX;
+ case OOM_LEVEL_BACKGROUND_MOST_RECENTLY_USED:
+ return OOMADJ_BACKGRD_UNLOCKED;
+ case OOM_LEVEL_FOREGROUND_AND_PROC:
+ return OOMADJ_FOREGRD_UNLOCKED;
+ default:
+ if (oom_level < OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED
+ || oom_level > OOM_LEVEL_ALL)
+ _E("oom level is out of range");
+
+ return OOMADJ_APP_MAX;
+ }
}
struct cgroup *get_cgroup_tree(int idx)
#define LOWMEM_DESTROY_REQUEST(_ctl) \
g_slice_free(typeof(*(_ctl)), _ctl); \
-#define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
+#define LOWMEM_SET_REQUEST(c, __flags, __oom_level, __size, __count, __cb) \
{ \
- (c)->flags = __flags; (c)->score = __score; \
- (c)->size_mb= __size; (c)->count = __count; \
+ (c)->flags = __flags; (c)->oom_level = __oom_level; \
+ (c)->size_mb= __size; (c)->count = __count; \
(c)->callback = __cb; \
}
return victim_cnt;
}
-static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
+static int calculate_range_of_oom(enum oom_level oom_level, int *min, int *max)
{
- if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
- _E("[LMK] oom score (%d) is out of scope", score);
+ if (oom_level < OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED
+ || oom_level > OOM_LEVEL_ALL) {
+ _E("[LMK] oom level (%d) is out of scope", oom_level);
return RESOURCED_ERROR_FAIL;
}
- *max = cgroup_get_highest_oom_score_adj(score);
- *min = cgroup_get_lowest_oom_score_adj(score);
+ *max = cgroup_get_highest_oom_score_adj(oom_level);
+ *min = cgroup_get_lowest_oom_score_adj(oom_level);
return RESOURCED_ERROR_NONE;
}
unsigned int total_size_mb = 0;
unsigned int current_size = 0;
unsigned int reclaim_size_mb, shortfall_mb = 0;
- enum oom_score oom_score = ctl->score;
+ enum oom_level oom_level = ctl->oom_level;
available_mb = proc_get_mem_available();
reclaim_size_mb = ctl->size_mb > available_mb /* MB */
retry:
/* Prepare LMK to start doing it's job. Check preconditions. */
- if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
+ if (calculate_range_of_oom(oom_level, &start_oom, &end_oom))
goto done;
lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
(Make sluggish or kill same victims continuously)
Thus, otherwise, just return in first operation and wait some period.
*/
- if (oom_score == OOM_SCORE_LOW) {
- oom_score = OOM_SCORE_MEDIUM;
+ if (oom_level == OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED) {
+ oom_level = OOM_LEVEL_BACKGROUND_MOST_RECENTLY_USED;
goto retry;
- } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
- oom_score = OOM_SCORE_HIGH;
+ } else if ((oom_level == OOM_LEVEL_BACKGROUND_MOST_RECENTLY_USED)
+ && (ctl->flags & OOM_IN_DEPTH)) {
+ oom_level = OOM_LEVEL_FOREGROUND_AND_PROC;
if(ctl->flags & OOM_FORCE)
max_victim_cnt = FOREGROUND_VICTIMS;
goto retry;
- } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
+ } else if ((oom_level == OOM_LEVEL_FOREGROUND_AND_PROC)
+ && (ctl->flags & OOM_IN_DEPTH)) {
status = LOWMEM_RECLAIM_RETRY;
- ctl->score = OOM_SCORE_MAX;
+ ctl->oom_level = OOM_LEVEL_ALL;
}
- else if (oom_score == OOM_SCORE_MAX) {
+ else if (oom_level == OOM_LEVEL_ALL) {
status = LOWMEM_RECLAIM_RETRY;
}
done:
return;
}
- lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
+ lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(
+ OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED);
if (oom_score_adj < lowest_oom_score_adj) {
oom_score_adj = lowest_oom_score_adj;
lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
}
-int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
+int lowmem_trigger_reclaim(int flags, int victims, enum oom_level oom_level, int threshold_mb)
{
struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
- score = score > 0 ? score : OOM_SCORE_LOW;
+ oom_level = oom_level > OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED ?
+ oom_level : OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED;
threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
LOWMEM_SET_REQUEST(ctl, flags,
- score, threshold_mb, victims,
+ oom_level, threshold_mb, victims,
lowmem_force_reclaim_cb);
lowmem_queue_request(&lmw, ctl);
return 0;
}
-int lowmem_queue_new_request(unsigned int flags, enum oom_score score,
+int lowmem_queue_new_request(unsigned int flags, enum oom_level oom_level,
unsigned int size_mb, unsigned int count,
void (*callback)(struct lowmem_control *))
{
return RESOURCED_ERROR_OUT_OF_MEMORY;
ctl->flags = flags;
- ctl->score = score;
+ ctl->oom_level = oom_level;
ctl->size_mb = size_mb;
ctl->count = count;
ctl->callback = callback;
return RESOURCED_ERROR_NONE;
}
-void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
+void lowmem_trigger_swap_reclaim(enum oom_level oom_level, unsigned long long swap_size_bytes)
{
int size_mb, victims;
? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
- lowmem_trigger_reclaim(0, victims, score, size_mb);
+ lowmem_trigger_reclaim(0, victims, oom_level, size_mb);
}
bool lowmem_fragmentated(void)
_D("history based proactive LMK : avg rss %u, available %u required = %u MB",
rss_mb, before_mb, size_mb);
- lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
+ lowmem_trigger_reclaim(0, victims,
+ OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED, size_mb);
return;
}
*/
_D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
proactive_leave_mb + THRESHOLD_MARGIN);
- lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
+ lowmem_trigger_reclaim(0,
+ victims, OOM_LEVEL_BACKGROUND_LEAST_RECENTLY_USED,
+ proactive_leave_mb + THRESHOLD_MARGIN);
}
unsigned int lowmem_get_proactive_thres(void)
unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk);
void lowmem_trigger_swap(pid_t pid, char *path, bool move);
-int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold);
-void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes);
+int lowmem_trigger_reclaim(int flags, int victims, enum oom_level oom_level, int threshold);
+void lowmem_trigger_swap_reclaim(enum oom_level oom_level, unsigned long long swap_size_bytes);
bool lowmem_fragmentated(void);
unsigned int lowmem_get_proactive_thres(void);
void lowmem_change_memory_state(int state, int force);
*/
/* Processing flags*/
unsigned int flags;
- /* Indictator for OOM score of targeted processes */
- enum oom_score score;
+ /* Indictator for OOM level of targeted processes */
+ enum oom_level oom_level;
/* Desired size to be restored - level to be reached (MB)*/
unsigned int size_mb;
};
int lowmem_worker_is_running();
-int lowmem_queue_new_request(unsigned int flags, enum oom_score score,
+int lowmem_queue_new_request(unsigned int flags, enum oom_level oom_level,
unsigned int size_mb, unsigned int count,
void (*callback) (struct lowmem_control *));
void lowmem_set_oom_popup(bool popup);