-[MemoryGroupLimit]
-VipGroupLimit= 100%
-HighGroupLimit= 100%
-MediumGroupLimit= 100%
-LowestGroupLimit= 100%
[MemoryLevelThreshold]
#MediumLevel= 20%
[MemoryAppStatusLimit]
#BackgroundPerAppLimitAction=768MB,kill
+[MemoryThrottling]
+ThrottlingLimit= 80%
+
[CpuThrottling]
-CpuSched=idle
+#CpuSched=idle
#CpuNice=19
-CpuShare=64
-CpuCFSRuntime=50ms
-CpuCFSPeriod=1s
+#CpuShare=64
+#CpuCFSRuntime=50ms
+#CpuCFSPeriod=1s
[MemorySwap]
ReclaimAtBoot=yes
SwapType=zram
-VipGroupSwappiness=0
-HighGroupSwappiness=0
-MediumGroupSwappiness=0
-LowestGroupSwappiness=0
+ThrottlingSwappiness=80
[MemoryZram]
CompAlgorithm=zstd
PoolType=z3fold
[MemoryDedup]
-DedupAtBoot=yes
-ScanOnLowmem=yes
+#DedupAtBoot=yes
+#ScanOnLowmem=yes
[MemoryKsm]
-KsmMode=oneshot
+#KsmMode=oneshot
#KsmMode=periodic
-PagesToScan=100
-PagesToScanWithBoost=1000
+#PagesToScan=100
+#PagesToScanWithBoost=1000
[MemoryCompaction]
-FragLevel=800
+#FragLevel=800
[CpuSched]
#CpuSchedFeature=no_rt_runtime_share
#define CGROUP_PATH "/sys/fs/cgroup"
-#define CGROUP_PER_PROCESS_NAME ""
-#define CGROUP_GROUP_NAME ""
-
#define CGROUP_DEFAULT_USE_HIERARCHY false
bool use_hierarchy;
/* memory cgroup information */
struct memcg_info *memcg_info;
- /* list of child cgroups when using multi groups */
- GSList *child_cgroups;
};
#include "procfs.h"
#include "proc-common.h"
-#define BUF_MAX 1023
-#define MEMCG_NO_LIMIT 0
-
static int default_swappiness = 0;
static unsigned long long totalram_bytes = 0;
* This structure has full hierarchy of cgroups on running system.
**/
static struct cgroup cgroup_tree[MEMCG_END] = {
- {"/", MEMCG_TOP, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {MEMCG_VIP_NAME, MEMCG_ROOT, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {MEMCG_HIGH_NAME, MEMCG_VIP, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {MEMCG_MEDIUM_NAME, MEMCG_HIGH, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {MEMCG_LOW_NAME, MEMCG_MEDIUM, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
+ {"/", MEMCG_TOP, CGROUP_DEFAULT_USE_HIERARCHY, NULL},
+ {MEMCG_THROTTLING_NAME, MEMCG_ROOT, CGROUP_DEFAULT_USE_HIERARCHY, NULL},
};
static struct memcg_info gmemcg_info[MEMCG_END] = {
{MEMCG_PATH,},
- {MEMCG_VIP_PATH,},
- {MEMCG_HIGH_PATH,},
- {MEMCG_MEDIUM_PATH,},
- {MEMCG_LOW_PATH,},
+ {MEMCG_THROTTLING_PATH,},
};
int cgroup_get_type(int oom_score_adj)
{
- if (oom_score_adj == OOMADJ_SERVICE_MIN)
- return MEMCG_VIP;
- else if (oom_score_adj >= OOMADJ_SU &&
- oom_score_adj < OOMADJ_BACKGRD_PERCEPTIBLE)
- return MEMCG_HIGH;
- else if (oom_score_adj >= OOMADJ_BACKGRD_PERCEPTIBLE &&
- oom_score_adj < OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE)
- return MEMCG_MEDIUM;
- else if (oom_score_adj >= OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE &&
+ if (oom_score_adj >= OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE &&
oom_score_adj <= OOMADJ_APP_MAX)
- return MEMCG_LOW;
+ return MEMCG_THROTTLING;
else
return MEMCG_ROOT;
}
-int cgroup_get_lowest_oom_score_adj(int type)
+int cgroup_get_lowest_oom_score_adj(int score)
{
- if (type < MEMCG_ROOT || type > MEMCG_LOW) {
- _E("cgroup type should be located between MEMCG_ROOT and MEMCG_LOW");
+ if (score < OOM_SCORE_HIGH || score > OOM_SCORE_MAX) {
+ _E("oom score should be located between OOM_SCORE_HIGH and OOM_SCORE_MAX");
}
- if (type == MEMCG_VIP)
- return OOMADJ_SERVICE_MIN;
- else if (type == MEMCG_HIGH)
+ if (score == OOM_SCORE_HIGH)
return OOMADJ_SU;
- else if (type == MEMCG_MEDIUM)
+ else if (score == OOM_SCORE_MEDIUM)
return OOMADJ_BACKGRD_PERCEPTIBLE;
- else if (type == MEMCG_LOW)
+ else if (score == OOM_SCORE_LOW)
return OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE;
else
return OOMADJ_SU;
}
-int cgroup_get_highest_oom_score_adj(int type)
+int cgroup_get_highest_oom_score_adj(int score)
{
- if (type == MEMCG_VIP)
- return OOMADJ_SERVICE_MIN;
- else if (type == MEMCG_HIGH)
+ if (score < OOM_SCORE_HIGH || score > OOM_SCORE_MAX) {
+ _E("oom score should be located between OOM_SCORE_HIGH and OOM_SCORE_MAX");
+ }
+
+ if (score == OOM_SCORE_HIGH)
return OOMADJ_FOREGRD_UNLOCKED;
- else if (type == MEMCG_MEDIUM)
+ else if (score == OOM_SCORE_MEDIUM)
return OOMADJ_BACKGRD_UNLOCKED;
else
return OOMADJ_APP_MAX;
return cgroup_tree[idx].memcg_info;
}
-static GSList *get_child_cgroups(int idx)
-{
- if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
- _E("[CGROUP] (%d) cgroup tree's child is NULL", idx);
- return NULL;
- }
- else
- return cgroup_tree[idx].child_cgroups;
-}
-
static int get_parent_cgroup(int idx)
{
if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
unsigned int i;
unsigned long long lower_group_limit_bytes = 0;
- for (i = MEMCG_LOW; i > MEMCG_ROOT; i--) {
+ for (i = MEMCG_THROTTLING; i > MEMCG_ROOT; i--) {
struct memcg_info *mi = get_memcg_info(i);
if (mi->limit_bytes < lower_group_limit_bytes)
{
unsigned int i;
- for (i = MEMCG_VIP; i < MEMCG_END; i++) {
+ for (i = MEMCG_THROTTLING; i < MEMCG_END; i++) {
struct memcg_info *mi = get_memcg_info(i);
memcg_write_optimizer_info(mi);
}
void memcg_params_init(void)
{
int idx = 0;
- GSList *child_cgroups;
for (idx = MEMCG_ROOT; idx < MEMCG_END; idx++) {
struct memcg_info *mi = &gmemcg_info[idx];
memcg_root = mi;
else {
int parent_idx = get_parent_cgroup(idx);
- child_cgroups = get_child_cgroups(parent_idx);
- child_cgroups = g_slist_prepend(child_cgroups, get_cgroup_tree(idx));
set_use_hierarchy(parent_idx, true);
}
int memcg_make_full_subdir(const char* parentdir)
{
int result;
- char path[MAX_PATH_LENGTH] = {0, };
- result = str_name_cpy(path, parentdir, sizeof(path), strlen(parentdir));
- ret_value_if(result < 0, result);
-
- for(int i = MEMCG_VIP; i < MEMCG_END; i++) {
- char name[MAX_NAME_LENGTH] = {0, };
-
- if(i == MEMCG_VIP) {
- result = str_name_cpy(name, MEMCG_MAKE_NAME(VIP), sizeof(name), strlen(MEMCG_MAKE_NAME(VIP)));
- ret_value_if(result < 0, result);
- }
- else if(i == MEMCG_HIGH) {
- result = str_name_cpy(name, MEMCG_MAKE_NAME(HIGH), sizeof(name), strlen(MEMCG_MAKE_NAME(HIGH)));
- ret_value_if(result < 0, result);
- }
- else if(i == MEMCG_MEDIUM) {
- result = str_name_cpy(name, MEMCG_MAKE_NAME(MEDIUM), sizeof(name), strlen(MEMCG_MAKE_NAME(MEDIUM)));
- ret_value_if(result < 0, result);
- }
- else if(i == MEMCG_LOW) {
- result = str_name_cpy(name, MEMCG_MAKE_NAME(LOW), sizeof(name), strlen(MEMCG_MAKE_NAME(LOW)));
- ret_value_if(result < 0, result);
- }
-
- result = cgroup_make_subdir(path, name, NULL);
- ret_value_msg_if(result < 0, result, "%s/%s init failed\n", path, name);
-
- result = str_name_cat(path, "/", sizeof(path), strlen(path), 1);
- ret_value_if(result < 0, result);
- result = str_name_cat(path, name, sizeof(path), strlen(path), strlen(name));
- ret_value_if(result < 0, result);
-
- // ../../perprocess
-/* result = cgroup_make_subdir(path, MEMCG_MAKE_NAME(PER_PROCESS), NULL);
- ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
- path, MEMCG_MAKE_NAME(PER_PROCESS));
- // ../../group
- result = cgroup_make_subdir(path, MEMCG_MAKE_NAME(GROUP), NULL);
- ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
- path, MEMCG_MAKE_NAME(GROUP));*/
- }
+ result = cgroup_make_subdir(parentdir, MEMCG_MAKE_NAME(THROTTLING), NULL);
+ ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
+ parentdir, MEMCG_MAKE_NAME(THROTTLING));
+ result = cgroup_make_subdir(parentdir, MEMCG_MAKE_NAME(PRIVATE), NULL);
+ ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
+ parentdir, MEMCG_MAKE_NAME(PRIVATE));
return RESOURCED_ERROR_NONE;
}
/* number of memory cgroups */
#define MEMCG_DEFAULT_EVENT_LEVEL "low"
-#define MEMCG_LOW_RATIO 0.8
-#define MEMCG_MEDIUM_RATIO 0.96
-#define MEMCG_FOREGROUND_LEAVE_RATIO 0.25
-
-#define MEMCG_VIP_NAME ""
-#define MEMCG_HIGH_NAME "High"
-#define MEMCG_MEDIUM_NAME "Medium"
-#define MEMCG_LOW_NAME "Lowest"
+#define MEMCG_THROTTLING_NAME "Throttling"
+#define MEMCG_PRIVATE_NAME "Private"
#define MEMCG_NAME "memory"
#define MEMCG_PATH CGROUP_PATH "/" MEMCG_NAME
-#define MEMCG_VIP_PATH MEMCG_PATH "/" MEMCG_VIP_NAME
-#define MEMCG_HIGH_PATH MEMCG_PATH "/" MEMCG_VIP_NAME "/" MEMCG_HIGH_NAME
-#define MEMCG_MEDIUM_PATH MEMCG_PATH "/" MEMCG_VIP_NAME "/" MEMCG_HIGH_NAME "/" MEMCG_MEDIUM_NAME
-#define MEMCG_LOW_PATH MEMCG_PATH "/" MEMCG_VIP_NAME "/" MEMCG_HIGH_NAME "/" MEMCG_MEDIUM_NAME "/" MEMCG_LOW_NAME
-
-#define MEMCG_VIP_PP_PATH MEMCG_VIP_PATH "/" CGROUP_PER_PROCESS_NAME
-#define MEMCG_VIP_GROUP_PATH MEMCG_VIP_PATH "/" CGROUP_GROUP_NAME
-
-#define MEMCG_HIGH_PP_PATH MEMCG_HIGH_PATH "/" CGROUP_PER_PROCESS_NAME
-#define MEMCG_HIGH_GROUP_PATH MEMCG_HIGH_PATH "/" CGROUP_GROUP_NAME
-
-#define MEMCG_MEDIUM_PP_PATH MEMCG_MEDIUM_PATH "/" CGROUP_PER_PROCESS_NAME
-#define MEMCG_MEDIUM_GROUP_PATH MEMCG_MEDIUM_PATH "/" CGROUP_GROUP_NAME
-
-#define MEMCG_LOW_PP_PATH MEMCG_LOW_PATH "/" CGROUP_PER_PROCESS_NAME
-#define MEMCG_LOW_GROUP_PATH MEMCG_LOW_PATH "/" CGROUP_GROUP_NAME
+#define MEMCG_THROTTLING_PATH MEMCG_PATH "/" MEMCG_THROTTLING_NAME
+#define MEMCG_PRIVATE_PATH MEMCG_PATH "/" MEMCG_PRIVATE_NAME
#define MEMCG_OOM_CONTROL "memory.oom_control"
#define MEMCG_EVENTFD_CONTROL "cgroup.event_control"
#define DEFAULT_MEMLOG_PATH "/var/log"
#define DEFAULT_MEMLOG_NR_MAX 50
+#define MEMCG_NO_LIMIT 0
+
enum {
MEM_LEVEL_HIGH,
MEM_LEVEL_MEDIUM,
MEM_LEVEL_MAX,
};
+enum oom_score {
+ OOM_SCORE_HIGH,
+ OOM_SCORE_MEDIUM,
+ OOM_SCORE_LOW,
+ OOM_SCORE_MAX,
+};
+
enum lowmem_control_type {
LOWMEM_MOVE_CGROUP,
LOWMEM_MANAGE_FOREGROUND,
/*
* [memory cgroup information]
- * MEMCG_ROOT : memory cgroup for root dir
- * MEMCG_VIP : memory cgroup for vip apps(or daemons)
- * MEMCG_HIGH : memory cgroup for foreground apps
- * MEMCG_MEDIUM : memory cgroup for background apps
- * MEMCG_LOW : memory cgroup for apps of the lowest privilege
+ * MEMCG_ROOT : memory cgroup for root dir
+ * MEMCG_THROTTLING : memory cgroup for throttling
*
* [memory cgroup hierarchy]
* (normal mode)
* root
- * ├─high─(tizendocker)
- * │ └─medium
- * │ └─low
- * └─system.slice/user.slice
- *
- * (vip mode)
- * root
- * │
- * vip
- * ├─high─(tizendocker)
- * │ └─medium
- * │ └─low
+ * └─Throttling
* └─system.slice/user.slice
*/
enum cgroup_type {
MEMCG_TOP = -1,
MEMCG_ROOT,
- MEMCG_VIP,
- MEMCG_HIGH,
- MEMCG_MEDIUM,
- MEMCG_LOW,
+ MEMCG_THROTTLING,
MEMCG_END,
};
swap_conf->swap_type = SWAP_TYPE_ZRAM;
}
}
- else if (!strncmp(result->name, VIP_GROUP_SWAPPINESS_NAME_CONF,
- strlen(VIP_GROUP_SWAPPINESS_NAME_CONF)+1)) {
- swap_conf->swappiness[MEMCG_VIP] = atoi(result->value);
- }
- else if (!strncmp(result->name, HIGH_GROUP_SWAPPINESS_NAME_CONF,
- strlen(HIGH_GROUP_SWAPPINESS_NAME_CONF)+1)) {
- swap_conf->swappiness[MEMCG_HIGH] = atoi(result->value);
- }
- else if (!strncmp(result->name, MEDIUM_GROUP_SWAPPINESS_NAME_CONF,
- strlen(MEDIUM_GROUP_SWAPPINESS_NAME_CONF)+1)) {
- swap_conf->swappiness[MEMCG_MEDIUM] = atoi(result->value);
- }
- else if (!strncmp(result->name, LOWEST_GROUP_SWAPPINESS_NAME_CONF,
- strlen(LOWEST_GROUP_SWAPPINESS_NAME_CONF)+1)) {
- swap_conf->swappiness[MEMCG_LOW] = atoi(result->value);
+ else if (!strncmp(result->name, THROTTLING_SWAPPINESS_NAME_CONF,
+ strlen(THROTTLING_SWAPPINESS_NAME_CONF)+1)) {
+ swap_conf->swappiness[MEMCG_THROTTLING] = atoi(result->value);
}
else {
_E("[CONFIG] Unknown configuration name (%s) and value (%s) on section (%s)",
return RESOURCED_ERROR_FAIL;
}
- if (!strncmp(result->section, MEMORY_GROUP_LIMIT_SECTION,
- strlen(MEMORY_GROUP_LIMIT_SECTION)+1)) {
+ if (!strncmp(result->section, MEMORY_THROTTLING_SECTION,
+ strlen(MEMORY_THROTTLING_SECTION)+1)) {
char *ptr = strchr(result->value, '%');
if (ptr == NULL) {
_E("[CONFIG] Cannot find '%%' in the string (%s)", result->value);
else
*ptr = '\0';
- if (!strncmp(result->name, VIP_GROUP_LIMIT_NAME_CONF,
- strlen(VIP_GROUP_LIMIT_NAME_CONF) + 1)) {
- memcg_conf->cgroup_limit[MEMCG_VIP] = atof(result->value);
- }
- else if (!strncmp(result->name, HIGH_GROUP_LIMIT_NAME_CONF,
- strlen(HIGH_GROUP_LIMIT_NAME_CONF) + 1)) {
- memcg_conf->cgroup_limit[MEMCG_HIGH] = atof(result->value);
- }
- else if (!strncmp(result->name, MEDIUM_GROUP_LIMIT_NAME_CONF,
- strlen(MEDIUM_GROUP_LIMIT_NAME_CONF) + 1)) {
- memcg_conf->cgroup_limit[MEMCG_MEDIUM] = atof(result->value);
- }
- else if (!strncmp(result->name, LOWEST_GROUP_LIMIT_NAME_CONF,
- strlen(LOWEST_GROUP_LIMIT_NAME_CONF) + 1)) {
- memcg_conf->cgroup_limit[MEMCG_LOW] = atof(result->value);
+ if (!strncmp(result->name, THROTTLING_LIMIT_NAME_CONF,
+ strlen(THROTTLING_LIMIT_NAME_CONF) + 1)) {
+ memcg_conf->cgroup_limit[MEMCG_THROTTLING] = atof(result->value);
}
else {
_E("[CONFIG] Unknown configuration name (%s) and value (%s) on section (%s)",
if (!result || !user_data)
return RESOURCED_ERROR_INVALID_PARAMETER;
- if (strncmp(result->section, PER_PROCESS_SECTION, strlen(PER_PROCESS_SECTION)+1))
+ if (strncmp(result->section, PRIVATE_SECTION, strlen(PRIVATE_SECTION)+1))
return RESOURCED_ERROR_NONE;
/* common(App or Service) */
_E("Failed to allocate memory during parsing vendor configurations");
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
- pci->mem_type = MEMCG_TOP;
pci->cpu_sched_info.cpu_sched_type = CPU_SCHED_NONE;
pci->cpu_sched_info.cpu_rt_priority = CPU_INIT_PRIO;
pci->cpu_sched_info.cpu_nice = CPU_INIT_NICE;
}
}
/* limiter.conf.d */
-/* else if (!strncmp(result->name, CPU_CGROUP_NAME_CONF, strlen(CPU_CGROUP_NAME_CONF)+1) &&
- *config_type == LIMITER_CONFIG) {
- if (!pci) {
- _E("process configuration information pointer should not be NULL");
- return RESOURCED_ERROR_FAIL;
- }
-
- if (!strncmp(result->value, CGROUP_VIP_VALUE_CONF,
- strlen(CGROUP_VIP_VALUE_CONF) +1)) {
- pci->cpu_type = CGROUP_VIP;
- }
- else if (!strncmp(result->value, CGROUP_HIGH_VALUE_CONF,
- strlen(CGROUP_HIGH_VALUE_CONF) +1)) {
- pci->cpu_type = CGROUP_HIGH;
- }
- else if (!strncmp(result->value, CGROUP_MEDIUM_VALUE_CONF,
- strlen(CGROUP_MEDIUM_VALUE_CONF) +1)) {
- pci->cpu_type = CGROUP_MEDIUM;
- }
- else if (!strncmp(result->value, CGROUP_LOW_VALUE_CONF,
- strlen(CGROUP_LOW_VALUE_CONF) +1)) {
- pci->cpu_type = CGROUP_LOW;
- }
- else {
- _E("invalid parameter (%s)", result->value);
- return RESOURCED_ERROR_INVALID_PARAMETER;
- }
- }*/
- else if (!strncmp(result->name, MEM_CGROUP_NAME_CONF, strlen(MEM_CGROUP_NAME_CONF)+1) &&
+/* else if (!strncmp(result->name, MEM_CGROUP_NAME_CONF, strlen(MEM_CGROUP_NAME_CONF)+1) &&
*config_type == LIMITER_CONFIG) {
if (!pci) {
_E("process configuration information pointer should not be NULL");
return RESOURCED_ERROR_FAIL;
}
- if (!strncmp(result->value, CGROUP_VIP_VALUE_CONF,
- strlen(CGROUP_VIP_VALUE_CONF) +1)) {
- pci->mem_type = MEMCG_VIP;
- }
- else if (!strncmp(result->value, CGROUP_HIGH_VALUE_CONF,
+ if (!strncmp(result->value, CGROUP_HIGH_VALUE_CONF,
strlen(CGROUP_HIGH_VALUE_CONF) +1)) {
pci->mem_type = MEMCG_HIGH;
}
_E("invalid parameter (%s)", result->value);
return RESOURCED_ERROR_INVALID_PARAMETER;
}
- }
+ }*/
else if (!strncmp(result->name, MEM_LIMIT_ACTION_NAME_CONF,
strlen(MEM_LIMIT_ACTION_NAME_CONF)+1) && *config_type == LIMITER_CONFIG) {
int error;
error = set_mem_action_conf(&pci->mem_action, result->value);
return error;
}
+ else if (!strncmp(result->name, MEMORY_THROTTLING_NAME_CONF,
+ strlen(MEMORY_THROTTLING_NAME_CONF)+1) && *config_type == LIMITER_CONFIG) {
+
+ if (!pci) {
+ _E("process configuration information pointer should not be NULL");
+ return RESOURCED_ERROR_FAIL;
+ }
+
+ /* Enable throttling of a service */
+ pci->memory_throttling_enable = config_parse_bool(result->value);
+ }
else if (!strncmp(result->name, CPU_THROTTLING_NAME_CONF,
strlen(CPU_THROTTLING_NAME_CONF)+1) && *config_type == LIMITER_CONFIG) {
/* section name */
/* limiter.conf */
-#define PER_PROCESS_SECTION "PerProcess"
-#define MEMORY_GROUP_LIMIT_SECTION "MemoryGroupLimit"
+#define PRIVATE_SECTION "Private"
#define MEMORY_LEVEL_THRESHOLD_SECTION "MemoryLevelThreshold"
#define MEMORY_APP_TYPE_LIMIT_SECTION "MemoryAppTypeLimit"
#define MEMORY_APP_STATUS_LIMIT_SECTION "MemoryAppStatusLimit"
+#define MEMORY_THROTTLING_SECTION "MemoryThrottling"
#define CPU_THROTTLING_SECTION "CpuThrottling"
/* optimizer.conf */
#define MEM_LIMIT_ACTION_NAME_CONF "MemLimitAction"
#define ACTION_ON_FAILURE_NAME_CONF "ActionOnFailure"
#define WATCHDOG_ACTION_NAME_CONF "WatchdogAction"
-#define VIP_GROUP_LIMIT_NAME_CONF "VipGroupLimit"
-#define HIGH_GROUP_LIMIT_NAME_CONF "HighGroupLimit"
-#define MEDIUM_GROUP_LIMIT_NAME_CONF "MediumGroupLimit"
-#define LOWEST_GROUP_LIMIT_NAME_CONF "LowestGroupLimit"
+#define THROTTLING_LIMIT_NAME_CONF "ThrottlingLimit"
#define MEDIUM_LEVEL_NAME_CONF "MediumLevel"
#define LOW_LEVEL_NAME_CONF "LowLevel"
#define CRITICAL_LEVEL_NAME_CONF "CriticalLevel"
#define CPU_CFS_RUN_TIME_NAME_CONF "CpuCFSRuntime"
#define CPU_CFS_PERIOD_NAME_CONF "CpuCFSPeriod"
#define CPU_BOOSTING_LEVEL_NAME_CONF "CpuBoostingLevel"
+#define MEMORY_THROTTLING_NAME_CONF "MemoryThrottling"
#define CPU_THROTTLING_NAME_CONF "CpuThrottling"
/* optimizer.conf */
#define SWAP_ENABLE_NAME_CONF "SwapEnable"
#define RECLAIM_AT_BOOT_NAME_CONF "ReclaimAtBoot"
#define SWAP_TYPE_NAME_CONF "SwapType"
-#define VIP_GROUP_SWAPPINESS_NAME_CONF "VipGroupSwappiness"
-#define HIGH_GROUP_SWAPPINESS_NAME_CONF "HighGroupSwappiness"
-#define MEDIUM_GROUP_SWAPPINESS_NAME_CONF "MediumGroupSwappiness"
-#define LOWEST_GROUP_SWAPPINESS_NAME_CONF "LowestGroupSwappiness"
+#define THROTTLING_SWAPPINESS_NAME_CONF "ThrottlingSwappiness"
#define COMP_ALGORITHM_NAME_CONF "CompAlgorithm"
#define ZRAM_RATIO_NAME_CONF "ZramRatio"
#define POOL_RATIO_NAME_CONF "PoolRatio"
#define FOREGROUND_APPS_NAME_CONF "ForegroundApps"
/* configuration value */
-#define CGROUP_VIP_VALUE_CONF "vip"
-#define CGROUP_HIGH_VALUE_CONF "high"
-#define CGROUP_MEDIUM_VALUE_CONF "medium"
#define CGROUP_LOW_VALUE_CONF "lowest"
#define ACTION_BROADCAST_VALUE_CONF "broadcast"
#define ACTION_RECLAIM_VALUE_CONF "reclaim"
RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
RESOURCED_NOTIFIER_MEM_CONTROL,
RESOURCED_NOTIFIER_LIMIT_APP,
+ RESOURCED_NOTIFIER_THROTTLING_RESOURCE,
/*
* control system service (*.service)
*/
RESOURCED_NOTIFIER_SYSTEM_SERVICE,
RESOURCED_NOTIFIER_LIMIT_SYSTEM_SERVICE,
- RESOURCED_NOTIFIER_THROTTLING_SYSTEM_SERVICE,
+
+
/*
* real time scheduler
struct proc_conf_info {
char name[MAX_NAME_LENGTH];
- enum cgroup_type mem_type; /* fixed memory cgroup */
- enum cgroup_type cpu_type; /* fixed cpu cgroup */
enum proc_action watchdog_action; /* watchdog action */
enum proc_action fail_action; /* release action */
struct mem_action mem_action;
struct cpu_sched_info cpu_sched_info;
+ bool memory_throttling_enable;
bool cpu_throttling_enable;
pid_t pid;
};
pid_t main_pid;
pid_list childs;
bool app_watchdog_exclude;
- bool app_memcg_update_exclude;
bool app_cpu_nice_update_exclude;
int runtime_exclude;
int flags;
struct lowmem_control_data lowmem_data;
char buf[sizeof(PROC_OOM_SCORE_ADJ_PATH) + MAX_DEC_SIZE(int)] = {0};
- /* Don't touch OOM-fixed process' score */
- if (pai && pai->app_memcg_update_exclude)
- return RESOURCED_ERROR_NONE;
-
snprintf(buf, sizeof(buf), PROC_OOM_SCORE_ADJ_PATH, pid);
fp = fopen(buf, "r+");
if (fp == NULL)
#include "proc-common.h"
-#define OOMADJ_SERVICE_MIN (-900)
#define OOMADJ_SU (0)
#define OOMADJ_INIT (100)
#define OOMADJ_FOREGRD_LOCKED (150)
swap_conf->enable = false;
swap_conf->boot_reclaim_enable = false;
swap_conf->swap_type = SWAP_TYPE_NONE;
+ swap_conf->swappiness[MEMCG_THROTTLING] = MEMORY_INIT_SWAPPINESS;
}
}
#define SWAP_FILE_NAME "/opt/usr/.swapfile"
#define SWAP_CONF_FILE RD_CONFIG_FILE(optimizer)
+#define MEMORY_INIT_SWAPPINESS 200
+
enum swap_state {
SWAP_ARG_START = -1,
SWAP_OFF,
return NULL;
}
- if (proc_type == APP_TYPE)
+ if (proc_type == APP_TYPE) {
+ if (g_hash_table_size(fixed_app_list) == 0)
+ return NULL;
+
return (struct proc_conf_info *)g_hash_table_lookup(fixed_app_list, name);
- else
+ }
+ else {
+ if (g_hash_table_size(fixed_service_list) == 0)
+ return NULL;
+
return (struct proc_conf_info *)g_hash_table_lookup(fixed_service_list, name);
+ }
}
enum proc_action fixed_app_and_service_watchdog_action(const char *name, enum proc_type proc_type)
pci = fixed_app_and_service_exist_check(appid, APP_TYPE);
if (pci) {
- if (pci->mem_type != MEMCG_TOP) {
- proc_set_oom_score_adj(pid, cgroup_get_lowest_oom_score_adj(pci->mem_type), pai);
- pai->app_memcg_update_exclude = true;
- }
-
memset(&attr, 0, sizeof(struct sched_attr));
attr.size = sizeof(struct sched_attr);
if (pci->cpu_sched_info.cpu_nice >= CPU_MIN_NICE &&
default:
_E("[PROCESS] Unknown CPU sched type");
}
-
error = sched_setattr_of_all_tasks(pid, &attr, 0);
if (error)
_E("[PROCESS] Failed to set sched attributes");
register_notifier(RESOURCED_NOTIFIER_APP_FOREGRD, cpu_foreground_state);
register_notifier(RESOURCED_NOTIFIER_WIDGET_FOREGRD, cpu_foreground_state);
register_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, cpu_background_state);
- register_notifier(RESOURCED_NOTIFIER_THROTTLING_SYSTEM_SERVICE, cpu_background_state);
+ register_notifier(RESOURCED_NOTIFIER_THROTTLING_RESOURCE, cpu_background_state);
return RESOURCED_ERROR_NONE;
}
unregister_notifier(RESOURCED_NOTIFIER_APP_FOREGRD, cpu_foreground_state);
unregister_notifier(RESOURCED_NOTIFIER_WIDGET_FOREGRD, cpu_foreground_state);
unregister_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, cpu_background_state);
- unregister_notifier(RESOURCED_NOTIFIER_THROTTLING_SYSTEM_SERVICE, cpu_background_state);
+ unregister_notifier(RESOURCED_NOTIFIER_THROTTLING_RESOURCE, cpu_background_state);
return RESOURCED_ERROR_NONE;
}
static void lowmem_dbus_oom_trigger(GVariant *params)
{
lowmem_trigger_reclaim(OOM_NOMEMORY_CHECK,
- MAX_MEMORY_CGROUP_VICTIMS, MEMCG_LOW, 0);
+ MAX_MEMORY_CGROUP_VICTIMS, OOM_SCORE_LOW, 0);
}
static void lowmem_dbus_set_perceptible(GVariant *params)
g_variant_get(params, gtype, &pid);
ret_unless(pid > 0);
- lowmem_trigger_swap(pid, get_memcg_info(MEMCG_LOW)->name, true);
+ lowmem_trigger_swap(pid, get_memcg_info(MEMCG_THROTTLING)->name, true);
}
static void lowmem_dbus_set_memlimit(GVariant *params)
int lowmem_limit_set_system_service(pid_t pid, unsigned long long limit_bytes,
const char *name, enum proc_action action);
void lowmem_dbus_init(void);
-int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold);
-void lowmem_trigger_swap_reclaim(enum cgroup_type type, unsigned long long swap_size_bytes);
+int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold);
+void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes);
void lowmem_change_memory_state(int state, int force);
unsigned long lowmem_get_ktotalram(void);
unsigned long long lowmem_get_totalram(void);
if (!pai->memory.use_mem_limit)
return RESOURCED_ERROR_NO_DATA;
- ret = asprintf(&path, "%s/%s", MEMCG_HIGH_PP_PATH, pai->appid);
+ ret = asprintf(&path, "%s/%s", MEMCG_PRIVATE_PATH, pai->appid);
if (ret < 0) {
_E("[MEMORY-LIMIT] not enough memory");
return RESOURCED_ERROR_OUT_OF_MEMORY;
return RESOURCED_ERROR_FAIL;
}
- result = asprintf(&path, "%s/%s", MEMCG_HIGH_PP_PATH, name);
+ result = asprintf(&path, "%s/%s", MEMCG_PRIVATE_PATH, name);
if (result < 0) {
_E("[MEMORY-LIMIT] not enough memory");
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
- result = cgroup_make_subdir(MEMCG_HIGH_PP_PATH, name, NULL);
+ result = cgroup_make_subdir(MEMCG_PRIVATE_PATH, name, NULL);
if (result < 0) {
_E("[MEMORY-LIMIT] Failed to create cgroup subdir '%s/%s'",
- MEMCG_HIGH_PP_PATH, name);
+ MEMCG_PRIVATE_PATH, name);
return result;
}
return RESOURCED_ERROR_INVALID_PARAMETER;
}
- result = asprintf(&path, "%s/%s", MEMCG_HIGH_PP_PATH, pai->appid);
+ result = asprintf(&path, "%s/%s", MEMCG_PRIVATE_PATH, pai->appid);
if (result < 0) {
_E("[MEMORY-LIMIT] not enough memory");
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
- result = cgroup_make_subdir(MEMCG_HIGH_PP_PATH, pai->appid, NULL);
+ result = cgroup_make_subdir(MEMCG_PRIVATE_PATH, pai->appid, NULL);
if (result < 0) {
_E("[MEMORY-LIMIT] Failed to create cgroup subdir '%s/%s'",
- MEMCG_HIGH_PP_PATH, pai->appid);
+ MEMCG_PRIVATE_PATH, pai->appid);
return result;
}
#include "safe-kill.h"
#include "dedup-common.h"
-#define LOWMEM_THRES_INIT 0
-
-#define MEMPS_EXEC_PATH "usr/bin/memps"
-#define MEM_CONF_FILE RD_CONFIG_FILE(limiter)
-#define MEM_SECTION "Memory"
-#define MEM_VIP_SECTION "VIP_PROCESS"
-#define MEM_VIP_PREDEFINE "PREDEFINE"
-#define MEM_POPUP_SECTION "POPUP"
-#define MEM_POPUP_STRING "oom_popup"
-#define MEM_BG_RECLAIM_SECTION "BackgroundReclaim"
-#define MEM_BG_RECLAIM_STRING "AfterScreenDim"
-#define MEM_LOGGING_SECTION "Logging"
-
-#define BUF_MAX 1024
-#define MAX_VICTIMS_BETWEEN_CHECK 3
-#define MAX_PROACTIVE_LOW_VICTIMS 2
-#define MAX_PROACTIVE_HIGH_VICTIMS 4
-#define FOREGROUND_VICTIMS 1
-#define OOM_TIMER_INTERVAL 2
-#define OOM_KILLER_PRIORITY -20
-#define THRESHOLD_MARGIN 10 /* MB */
-
-#define MEM_SIZE_64 64 /* MB */
-#define MEM_SIZE_256 256 /* MB */
-#define MEM_SIZE_448 448 /* MB */
-#define MEM_SIZE_512 512 /* MB */
-#define MEM_SIZE_768 768 /* MB */
-#define MEM_SIZE_1024 1024 /* MB */
-#define MEM_SIZE_2048 2048 /* MB */
+#define LOWMEM_THRES_INIT 0
+
+#define MAX_VICTIMS_BETWEEN_CHECK 3
+#define MAX_PROACTIVE_HIGH_VICTIMS 4
+#define FOREGROUND_VICTIMS 1
+#define OOM_KILLER_PRIORITY -20
+#define THRESHOLD_MARGIN 10 /* MB */
+
+#define MEM_SIZE_64 64 /* MB */
+#define MEM_SIZE_256 256 /* MB */
+#define MEM_SIZE_448 448 /* MB */
+#define MEM_SIZE_512 512 /* MB */
+#define MEM_SIZE_768 768 /* MB */
+#define MEM_SIZE_1024 1024 /* MB */
+#define MEM_SIZE_2048 2048 /* MB */
/* thresholds for 64M RAM*/
-#define PROACTIVE_64_THRES 10 /* MB */
-#define PROACTIVE_64_LEAVE 30 /* MB */
-#define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
-#define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
-#define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
-#define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
-#define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
-#define CGROUP_ROOT_64_NUM_VICTIMS 1
+#define PROACTIVE_64_THRES 10 /* MB */
+#define PROACTIVE_64_LEAVE 30 /* MB */
+#define CGROUP_ROOT_64_THRES_DEDUP 16 /* MB */
+#define CGROUP_ROOT_64_THRES_SWAP 15 /* MB */
+#define CGROUP_ROOT_64_THRES_LOW 8 /* MB */
+#define CGROUP_ROOT_64_THRES_MEDIUM 5 /* MB */
+#define CGROUP_ROOT_64_THRES_LEAVE 8 /* MB */
+#define CGROUP_ROOT_64_NUM_VICTIMS 1
/* thresholds for 256M RAM */
-#define PROACTIVE_256_THRES 50 /* MB */
-#define PROACTIVE_256_LEAVE 80 /* MB */
-#define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
-#define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
-#define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
-#define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
-#define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
-#define CGROUP_ROOT_256_NUM_VICTIMS 2
+#define PROACTIVE_256_THRES 50 /* MB */
+#define PROACTIVE_256_LEAVE 80 /* MB */
+#define CGROUP_ROOT_256_THRES_DEDUP 60 /* MB */
+#define CGROUP_ROOT_256_THRES_SWAP 40 /* MB */
+#define CGROUP_ROOT_256_THRES_LOW 20 /* MB */
+#define CGROUP_ROOT_256_THRES_MEDIUM 10 /* MB */
+#define CGROUP_ROOT_256_THRES_LEAVE 20 /* MB */
+#define CGROUP_ROOT_256_NUM_VICTIMS 2
/* threshold for 448M RAM */
-#define PROACTIVE_448_THRES 80 /* MB */
-#define PROACTIVE_448_LEAVE 100 /* MB */
-#define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
-#define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
-#define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
-#define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
-#define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
-#define CGROUP_ROOT_448_NUM_VICTIMS 5
+#define PROACTIVE_448_THRES 80 /* MB */
+#define PROACTIVE_448_LEAVE 100 /* MB */
+#define CGROUP_ROOT_448_THRES_DEDUP 120 /* MB */
+#define CGROUP_ROOT_448_THRES_SWAP 100 /* MB */
+#define CGROUP_ROOT_448_THRES_LOW 60 /* MB */
+#define CGROUP_ROOT_448_THRES_MEDIUM 50 /* MB */
+#define CGROUP_ROOT_448_THRES_LEAVE 70 /* MB */
+#define CGROUP_ROOT_448_NUM_VICTIMS 5
/* threshold for 512M RAM */
-#define PROACTIVE_512_THRES 100 /* MB */
-#define PROACTIVE_512_LEAVE 80 /* MB */
-#define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
-#define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
-#define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
-#define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
-#define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
-#define CGROUP_ROOT_512_NUM_VICTIMS 5
+#define PROACTIVE_512_THRES 80 /* MB */
+#define PROACTIVE_512_LEAVE 100 /* MB */
+#define CGROUP_ROOT_512_THRES_DEDUP 140 /* MB */
+#define CGROUP_ROOT_512_THRES_SWAP 100 /* MB */
+#define CGROUP_ROOT_512_THRES_LOW 70 /* MB */
+#define CGROUP_ROOT_512_THRES_MEDIUM 60 /* MB */
+#define CGROUP_ROOT_512_THRES_LEAVE 80 /* MB */
+#define CGROUP_ROOT_512_NUM_VICTIMS 5
/* threshold for 768 RAM */
-#define PROACTIVE_768_THRES 100 /* MB */
-#define PROACTIVE_768_LEAVE 130 /* MB */
-#define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
-#define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
-#define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
-#define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
-#define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
-#define CGROUP_ROOT_768_NUM_VICTIMS 5
+#define PROACTIVE_768_THRES 100 /* MB */
+#define PROACTIVE_768_LEAVE 130 /* MB */
+#define CGROUP_ROOT_768_THRES_DEDUP 180 /* MB */
+#define CGROUP_ROOT_768_THRES_SWAP 150 /* MB */
+#define CGROUP_ROOT_768_THRES_LOW 90 /* MB */
+#define CGROUP_ROOT_768_THRES_MEDIUM 80 /* MB */
+#define CGROUP_ROOT_768_THRES_LEAVE 100 /* MB */
+#define CGROUP_ROOT_768_NUM_VICTIMS 5
/* threshold for more than 1024M RAM */
-#define PROACTIVE_1024_THRES 230 /* MB */
-#define PROACTIVE_1024_LEAVE 150 /* MB */
-#define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
-#define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
-#define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
-#define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
-#define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
-#define CGROUP_ROOT_1024_NUM_VICTIMS 5
+#define PROACTIVE_1024_THRES 150 /* MB */
+#define PROACTIVE_1024_LEAVE 230 /* MB */
+#define CGROUP_ROOT_1024_THRES_DEDUP 400 /* MB */
+#define CGROUP_ROOT_1024_THRES_SWAP 300 /* MB */
+#define CGROUP_ROOT_1024_THRES_LOW 120 /* MB */
+#define CGROUP_ROOT_1024_THRES_MEDIUM 100 /* MB */
+#define CGROUP_ROOT_1024_THRES_LEAVE 150 /* MB */
+#define CGROUP_ROOT_1024_NUM_VICTIMS 5
/* threshold for more than 2048M RAM */
-#define PROACTIVE_2048_THRES 200 /* MB */
-#define PROACTIVE_2048_LEAVE 500 /* MB */
-#define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
-#define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
-#define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
-#define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
-#define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
-#define CGROUP_ROOT_2048_NUM_VICTIMS 10
+#define PROACTIVE_2048_THRES 200 /* MB */
+#define PROACTIVE_2048_LEAVE 500 /* MB */
+#define CGROUP_ROOT_2048_THRES_DEDUP 400 /* MB */
+#define CGROUP_ROOT_2048_THRES_SWAP 300 /* MB */
+#define CGROUP_ROOT_2048_THRES_LOW 200 /* MB */
+#define CGROUP_ROOT_2048_THRES_MEDIUM 160 /* MB */
+#define CGROUP_ROOT_2048_THRES_LEAVE 300 /* MB */
+#define CGROUP_ROOT_2048_NUM_VICTIMS 10
/* threshold for more than 3072M RAM */
-#define PROACTIVE_3072_THRES 300 /* MB */
-#define PROACTIVE_3072_LEAVE 700 /* MB */
-#define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
-#define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
-#define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
-#define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
-#define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
-#define CGROUP_ROOT_3072_NUM_VICTIMS 10
+#define PROACTIVE_3072_THRES 300 /* MB */
+#define PROACTIVE_3072_LEAVE 700 /* MB */
+#define CGROUP_ROOT_3072_THRES_DEDUP 600 /* MB */
+#define CGROUP_ROOT_3072_THRES_SWAP 500 /* MB */
+#define CGROUP_ROOT_3072_THRES_LOW 400 /* MB */
+#define CGROUP_ROOT_3072_THRES_MEDIUM 250 /* MB */
+#define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
+#define CGROUP_ROOT_3072_NUM_VICTIMS 10
static unsigned proactive_threshold_mb;
static unsigned proactive_leave_mb;
/* Processing flags*/
unsigned int flags;
/* Indictator for OOM score of targeted processes */
- enum cgroup_type type;
+ enum oom_score score;
/* Desired size to be restored - level to be reached (MB)*/
unsigned int size_mb;
#define LOWMEM_DESTROY_REQUEST(_ctl) \
g_slice_free(typeof(*(_ctl)), _ctl); \
-#define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb) \
+#define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
{ \
- (c)->flags = __flags; (c)->type = __type; \
+ (c)->flags = __flags; (c)->score = __score; \
(c)->size_mb= __size; (c)->count = __count; \
(c)->callback = __cb; \
}
static const char *convert_cgroup_type_to_str(int type)
{
static const char *type_table[] =
- {"/", "VIP", "High", "Medium", "Lowest"};
- if (type >= MEMCG_ROOT && type <= MEMCG_LOW)
+ {"/", "Throttling"};
+ if (type >= MEMCG_ROOT && type <= MEMCG_THROTTLING)
return type_table[type];
else
return "Error";
continue;
}
- /* VIP pids should be excluded from the LMK list */
- if (cgroup_get_type(oom) == MEMCG_VIP)
- continue;
-
/*
* Check whether this array includes applications or not.
* If it doesn't require to get applications
return victim;
}
-static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
+static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
{
- if (type == MEMCG_VIP || type >= MEMCG_END || type <= MEMCG_TOP) {
- _E("cgroup type (%d) is out of scope", type);
+ if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
+ _E("[LMK] oom score (%d) is out of scope", score);
return RESOURCED_ERROR_FAIL;
}
- *max = cgroup_get_highest_oom_score_adj(type);
- *min = cgroup_get_lowest_oom_score_adj(type);
+ *max = cgroup_get_highest_oom_score_adj(score);
+ *min = cgroup_get_lowest_oom_score_adj(score);
return RESOURCED_ERROR_NONE;
}
unsigned int total_size_mb = 0;
unsigned int current_size = 0;
unsigned int reclaim_size_mb, shortfall_mb = 0;
- enum cgroup_type cgroup_type = ctl->type;
+ enum oom_score oom_score = ctl->score;
available_mb = proc_get_mem_available();
reclaim_size_mb = ctl->size_mb > available_mb /* MB */
retry:
/* Prepare LMK to start doing it's job. Check preconditions. */
- if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
+ if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
goto done;
lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
(Make sluggish or kill same victims continuously)
Thus, otherwise, just return in first operation and wait some period.
*/
- if (cgroup_type == MEMCG_LOW) {
- cgroup_type = MEMCG_MEDIUM;
+ if (oom_score == OOM_SCORE_LOW) {
+ oom_score = OOM_SCORE_MEDIUM;
goto retry;
- } else if ((cgroup_type == MEMCG_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
- cgroup_type = MEMCG_HIGH;
+ } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
+ oom_score = OOM_SCORE_HIGH;
if(ctl->flags & OOM_FORCE)
max_victim_cnt = FOREGROUND_VICTIMS;
goto retry;
- } else if ((cgroup_type == MEMCG_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
+ } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
status = LOWMEM_RECLAIM_RETRY;
- ctl->type = MEMCG_ROOT;
+ ctl->score = OOM_SCORE_MAX;
}
- else if (cgroup_type == MEMCG_ROOT) {
+ else if (oom_score == OOM_SCORE_MAX) {
status = LOWMEM_RECLAIM_RETRY;
}
done:
return;
}
- /* In this case, corresponding process will be moved to memory MEMCG_LOW.
+ /* In this case, corresponding process will be moved to memory MEMCG_THROTTLING.
*/
if (move) {
error = proc_get_oom_score_adj(pid, &oom_score_adj);
return;
}
- lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(MEMCG_LOW);
+ lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
if (oom_score_adj < lowest_oom_score_adj) {
oom_score_adj = lowest_oom_score_adj;
}
/* Correponding process is already managed per app or service.
- * In addition, if some process is already located in the MEMCG_LOW, then just do swap
+ * In addition, if some process is already located in the MEMCG_THROTTLING, then just do swap
*/
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
}
change_lowmem_state(MEM_LEVEL_HIGH);
if (swap_get_state() == SWAP_ON && memcg_swap_status) {
- resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(MEMCG_LOW));
+ resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(MEMCG_THROTTLING));
memcg_swap_status = false;
}
if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
ctl = LOWMEM_NEW_REQUEST();
if (ctl) {
LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
- MEMCG_LOW, get_root_memcg_info()->threshold_leave_mb,
+ OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
num_max_victims, medium_cb);
lowmem_queue_request(&lmw, ctl);
}
memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_768) {
- /* set thresholds for ram size 512M */
+ /* set thresholds for ram size 768M */
proactive_threshold_mb = PROACTIVE_768_THRES;
proactive_leave_mb = PROACTIVE_768_LEAVE;
memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
struct memcg_info *mi;
int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
- if(next_memcg_idx < MEMCG_VIP || next_memcg_idx > MEMCG_LOW) {
- _E("cgroup type (%d) should not be called", next_memcg_idx);
- return;
- }
mi = get_memcg_info(next_memcg_idx);
if (!mi) {
cur_oom_score_adj = pai->memory.oom_score_adj;
cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
- /* This pid is not yet registered at the memory cgroup.
- * plz, reference proc_create_app_info function
- */
- if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
- /* VIP processes should not be asked to move. */
- if (cur_memcg_idx <= MEMCG_VIP) {
- _E("[MEMORY-CGROUP] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
- return;
- }
- }
-
- _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
-
if (cur_oom_score_adj == next_oom_score_adj) {
_D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
return;
if(cur_memcg_idx == next_memcg_idx)
return;
+ _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
cgroup_write_pid_fullpath(mi->name, pid);
- if (next_memcg_idx == MEMCG_LOW)
- lowmem_swap_memory(get_memcg_info(MEMCG_LOW)->name);
+ if (next_memcg_idx == MEMCG_THROTTLING)
+ lowmem_swap_memory(get_memcg_info(MEMCG_THROTTLING)->name);
}
/* child pid */
else {
lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
}
-int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold_mb)
+int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
{
struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
- type = type > 0 ? type : MEMCG_LOW;
+ score = score > 0 ? score : OOM_SCORE_LOW;
threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
LOWMEM_SET_REQUEST(ctl, flags,
- type, threshold_mb, victims,
+ score, threshold_mb, victims,
lowmem_force_reclaim_cb);
lowmem_queue_request(&lmw, ctl);
return 0;
}
-void lowmem_trigger_swap_reclaim(enum cgroup_type type, unsigned long long swap_size_bytes)
+void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
{
int size_mb, victims;
? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
- lowmem_trigger_reclaim(0, victims, type, size_mb);
+ lowmem_trigger_reclaim(0, victims, score, size_mb);
}
bool lowmem_fragmentated(void)
_D("history based proactive LMK : avg rss %u, available %u required = %u MB",
rss_mb, before_mb, size_mb);
- lowmem_trigger_reclaim(0, victims, MEMCG_LOW, size_mb);
+ lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
return;
}
*/
_D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
proactive_leave_mb + THRESHOLD_MARGIN);
- lowmem_trigger_reclaim(0, victims, MEMCG_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
+ lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
}
unsigned int lowmem_get_proactive_thres(void)
return BYTE_TO_MBYTE(size_bytes);
}
-static void load_configs(const char *path)
+static void load_configs(void)
{
struct memcg_conf *memcg_conf = get_memcg_conf();
/* set MemoryGroupLimit section */
- for (int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
+ for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
if (memcg_conf->cgroup_limit[cgroup] > 0.0)
memcg_info_set_limit(get_memcg_info(cgroup),
memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
static void print_mem_configs(void)
{
/* print info of Memory section */
- for (int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
+ for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
_I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
}
setup_memcg_params();
/* default configuration */
- load_configs(MEM_CONF_FILE);
+ load_configs();
/* this function should be called after parsing configurations */
memcg_write_limiter_params();
* It means that there are many background processes or
* some process makes memory leak.
* So, it requires to trigger proactive oom killer
- * with MEMCG_ROOT type.
+ * with OOM_SCORE_MAX.
*/
- lowmem_trigger_swap_reclaim(MEMCG_ROOT, swap_size);
+ lowmem_trigger_swap_reclaim(OOM_SCORE_MAX, swap_size);
return -ENOSPC;
}
pthread_mutex_unlock(&swap_thread_queue.lock);
}
-static int swap_move_to_cgroup_by_pid(enum cgroup_type type, pid_t pid)
+static int swap_move_to_cgroup_by_pid(enum oom_score score, pid_t pid)
{
int error;
int oom_score_adj;
return RESOURCED_ERROR_FAIL;
}
- lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(type);
+ lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(score);
if (oom_score_adj < lowest_oom_score_adj)
oom_score_adj = lowest_oom_score_adj;
if (swap_node == SWAP_NODE_FORCE_RECLAIM)
return RESOURCED_ERROR_NONE;
- ret = check_oom_and_set_limit(mi->name, mi->limit_bytes);
+ if (mi->limit_ratio == MEMCG_NO_LIMIT) {
+ ret = cgroup_write_node_int32(mi->name, MEMCG_SWAP_LIMIT_BYTE, -1);
+ ret = cgroup_write_node_int32(mi->name, MEMCG_LIMIT_BYTE, -1);
+ }
+ else
+ ret = check_oom_and_set_limit(mi->name, mi->limit_bytes);
if (ret != RESOURCED_ERROR_NONE)
_E("[SWAP] Failed to change hard limit of %s cgroup to -1", mi->name);
else
return;
}
- cgroup_swap = get_cgroup_tree(MEMCG_LOW);
+ cgroup_swap = get_cgroup_tree(MEMCG_THROTTLING);
if (!cgroup_swap)
return;
- swap_move_to_cgroup_by_pid(MEMCG_LOW, pid);
+ swap_move_to_cgroup_by_pid(OOM_SCORE_LOW, pid);
swap_start_handler(cgroup_swap->memcg_info->name);
_I("[SWAP] swap cgroup entered : pid : %d", (int)pid);
}
_I("[SWAP] swap %s", arg_swap_enable == true ? "enable" : "disable");
_I("[SWAP] swap at boot %s", arg_swap_at_boot == true ? "enable" : "disable");
_I("[SWAP] swap type = %d", arg_swap_type);
-
- for(int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
- _I("[SWAP] cgroup (%s) swapiness = %d", cgroup == MEMCG_VIP ? "vip" :
- cgroup == MEMCG_HIGH ? "high" :
- cgroup == MEMCG_MEDIUM ? "medium" : "lowest", get_memcg_info(cgroup)->swappiness);
- }
}
static int swap_parse_config_file(void)
arg_swap_at_boot = swap_conf->boot_reclaim_enable;
- for(int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
- if (swap_conf->swappiness[cgroup] >= 0 &&
- swap_conf->swappiness[cgroup] <= 100)
- memcg_info_set_swappiness(get_memcg_info(cgroup),
- swap_conf->swappiness[cgroup]);
+ if (swap_conf->swappiness[MEMCG_THROTTLING] >= 0 &&
+ swap_conf->swappiness[MEMCG_THROTTLING] <= 100) {
+ memcg_info_set_swappiness(get_memcg_info(MEMCG_THROTTLING),
+ swap_conf->swappiness[MEMCG_THROTTLING]);
+ memcg_write_optimizer_params();
+ _I("[SWAP] cgroup (%s) swapiness = %d", "Throttling", get_memcg_info(MEMCG_THROTTLING)->swappiness);
}
gslist_for_each_item(iter, swap_module) {
if (!arg_swap_enable)
return -ENODEV;
- memcg_write_optimizer_params();
gslist_for_each_safe(swap_module, iter, next, swaps) {
swaps = (struct swap_module_ops *)iter->data;
_D("[SWAP] resourced swap init start");
- resourced_swap_change_memcg_settings(MEMCG_LOW);
+ resourced_swap_change_memcg_settings(MEMCG_THROTTLING);
swap_set_state(SWAP_OFF);
ret = swap_init();
static int swap_zram_reclaim(void *data)
{
- int r, type;
+ int r, score;
static unsigned long long swap_total_bytes = 0;
static bool zram_compact;
unsigned long long swap_available_bytes;
*/
if (lowmem_fragmentated()) {
if (zram_compact) {
- lowmem_trigger_swap_reclaim(MEMCG_ROOT, zram_control.zram_reclaim_bytes);
+ lowmem_trigger_swap_reclaim(OOM_SCORE_MAX, zram_control.zram_reclaim_bytes);
zram_compact = false;
} else {
swap_zram_compact();
if (!swap_total_bytes)
swap_total_bytes = KBYTE_TO_BYTE(proc_get_swap_total());
- r = memcg_get_swap_usage(MEMCG_LOW_GROUP_PATH, &swap_usage_bytes);
+ r = memcg_get_swap_usage(MEMCG_THROTTLING_PATH, &swap_usage_bytes);
if (r)
return r;
swapcg_usage_ratio = (float)(swap_usage_bytes / (swap_total_bytes - swap_available_bytes) *100);
if (swapcg_usage_ratio > SWAPCG_CHECK_RATIO)
- type = MEMCG_LOW;
+ score = OOM_SCORE_LOW;
else
- type = MEMCG_ROOT;
+ score = OOM_SCORE_MAX;
- lowmem_trigger_swap_reclaim(type, zram_control.zram_reclaim_bytes);
+ lowmem_trigger_swap_reclaim(score, zram_control.zram_reclaim_bytes);
zram_compact = false;
return -ENOSPC;
}
* So, it requires to trigger proactive oom killer.
*/
- lowmem_trigger_swap_reclaim(MEMCG_ROOT, swap_size_bytes);
+ lowmem_trigger_swap_reclaim(OOM_SCORE_MAX, swap_size_bytes);
return -ENOSPC;
}
pci->pid = pid;
- /* fixed memory cgroup */
- if (pci->mem_type != MEMCG_TOP) {
- proc_set_oom_score_adj(pid, cgroup_get_lowest_oom_score_adj(pci->mem_type), NULL);
- }
-
memset(&attr, 0, sizeof(struct sched_attr));
attr.size = sizeof(struct sched_attr);
if (pci->cpu_sched_info.cpu_nice >= CPU_MIN_NICE &&
_W("[WATCHDOG] Currently we support only REBOOT when a service is released");
}
- /* Put a system service into throttling group */
+ /* Put a system service into (cpu) throttling group */
if (pci->cpu_throttling_enable) {
struct proc_status ps = {0, };
ps.pid = pid;
ps.pai = NULL;
- resourced_notify(RESOURCED_NOTIFIER_THROTTLING_SYSTEM_SERVICE, &ps);
+ resourced_notify(RESOURCED_NOTIFIER_THROTTLING_RESOURCE, &ps);
}
/* if (pci->cpu_sched_info.cpu_sched_type == CPU_SCHED_FIFO ||
pci->cpu_sched_info.cpu_sched_type == CPU_SCHED_RR ||
}
}*/
+ /* Put a system service into (memory) throttling group */
+ if (pci->memory_throttling_enable)
+ cgroup_write_pid_fullpath(MEMCG_THROTTLING_PATH, pid);
+
/* register a notification when this service memory is over a threshold */
if (pci->mem_action.memory_bytes && pci->mem_action.action) {
struct proc_limit_status pls = {0, };