#include "resourced.h"
#include "trace.h"
#include "file-helper.h"
-#include "procfs.h"
#include <dirent.h>
#include <errno.h>
#include <unistd.h>
#include <sys/mount.h>
-#define MAKE_NAME(name) CGROUP_##name##_NAME
-
-
-/*
- * This structure has full hierarchy of cgroups on running system.
- * It is exported through lowmem-handler.h file.
- **/
-static struct cgroup cgroup_tree[CGROUP_END] = {
- {"/", CGROUP_TOP, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {CGROUP_VIP_NAME, CGROUP_ROOT, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {CGROUP_HIGH_NAME, CGROUP_VIP, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {CGROUP_MEDIUM_NAME, CGROUP_HIGH, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
- {CGROUP_LOW_NAME, CGROUP_MEDIUM, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
-};
-
-int cgroup_get_type(int oom_score_adj)
-{
- if (oom_score_adj == OOMADJ_SERVICE_MIN)
- return CGROUP_VIP;
- else if (oom_score_adj >= OOMADJ_SU &&
- oom_score_adj < OOMADJ_BACKGRD_PERCEPTIBLE)
- return CGROUP_HIGH;
- else if (oom_score_adj >= OOMADJ_BACKGRD_PERCEPTIBLE &&
- oom_score_adj < OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE)
- return CGROUP_MEDIUM;
- else if (oom_score_adj >= OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE &&
- oom_score_adj <= OOMADJ_APP_MAX)
- return CGROUP_LOW;
- else
- return CGROUP_ROOT;
-}
-
-int cgroup_get_lowest_oom_score_adj(int type)
-{
- if (type < CGROUP_ROOT || type > CGROUP_LOW) {
- _E("cgroup type should be located between CGROUP_ROOT and CGROUP_LOW");
- }
-
- if (type == CGROUP_VIP)
- return OOMADJ_SERVICE_MIN;
- else if (type == CGROUP_HIGH)
- return OOMADJ_SU;
- else if (type == CGROUP_MEDIUM)
- return OOMADJ_BACKGRD_PERCEPTIBLE;
- else if (type == CGROUP_LOW)
- return OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE;
- else
- return OOMADJ_SU;
-}
-
-int cgroup_get_highest_oom_score_adj(int type)
-{
- if (type == CGROUP_VIP)
- return OOMADJ_SERVICE_MIN;
- else if (type == CGROUP_HIGH)
- return OOMADJ_FOREGRD_UNLOCKED;
- else if (type == CGROUP_MEDIUM)
- return OOMADJ_BACKGRD_UNLOCKED;
- else
- return OOMADJ_APP_MAX;
-}
-
-struct cgroup *get_cgroup_tree(int idx)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) cgroup tree is NULL", idx);
- return NULL;
- }
- else
- return &cgroup_tree[idx];
-}
-
-void set_memcg_info(int idx, struct memcg_info *mi)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) index is out of scope", idx);
- }
- else
- cgroup_tree[idx].memcg_info = mi;
-}
-
-struct memcg_info *get_memcg_info(int idx)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) cgroup tree's memcg info is NULL", idx);
- return NULL;
- }
- else
- return cgroup_tree[idx].memcg_info;
-}
-
-GSList *get_child_cgroups(int idx)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) cgroup tree's child is NULL", idx);
- return NULL;
- }
- else
- return cgroup_tree[idx].child_cgroups;
-}
-
-int get_parent_cgroup(int idx)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) cgroup range is out of scope", idx);
- return CGROUP_TOP;
- }
- else {
- return cgroup_tree[idx].parent_cgroup;
- }
-}
-
-void set_use_hierarchy(int idx, bool use_hierarchy)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) cgroup range is out of scope", idx);
- }
- else {
- cgroup_tree[idx].use_hierarchy = use_hierarchy;
- }
-}
-
-bool get_use_hierarchy(int idx)
-{
- if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[CGROUP] (%d) cgroup range is out of scope", idx);
- return CGROUP_DEFAULT_USE_HIERARCHY;
- }
- else {
- return cgroup_tree[idx].use_hierarchy;
- }
-}
-
static bool cgroup_is_exists(const char *cgroup_full_path)
{
struct stat stat_buf;
return ret;
}
-int cgroup_make_full_subdir(const char* parentdir)
-{
- int result;
- char path[MAX_PATH_LENGTH] = {0, };
-
- result = str_name_cpy(path, parentdir, sizeof(path), strlen(parentdir));
- ret_value_if(result < 0, result);
-
- for(int i = CGROUP_VIP; i < CGROUP_END; i++) {
- char name[MAX_NAME_LENGTH] = {0, };
-
- if(i == CGROUP_VIP) {
- result = str_name_cpy(name, MAKE_NAME(VIP), sizeof(name), strlen(MAKE_NAME(VIP)));
- ret_value_if(result < 0, result);
- }
- else if(i == CGROUP_HIGH) {
- result = str_name_cpy(name, MAKE_NAME(HIGH), sizeof(name), strlen(MAKE_NAME(HIGH)));
- ret_value_if(result < 0, result);
- }
- else if(i == CGROUP_MEDIUM) {
- result = str_name_cpy(name, MAKE_NAME(MEDIUM), sizeof(name), strlen(MAKE_NAME(MEDIUM)));
- ret_value_if(result < 0, result);
- }
- else if(i == CGROUP_LOW) {
- result = str_name_cpy(name, MAKE_NAME(LOW), sizeof(name), strlen(MAKE_NAME(LOW)));
- ret_value_if(result < 0, result);
- }
-
- result = cgroup_make_subdir(path, name, NULL);
- ret_value_msg_if(result < 0, result, "%s/%s init failed\n", path, name);
-
- result = str_name_cat(path, "/", sizeof(path), strlen(path), 1);
- ret_value_if(result < 0, result);
- result = str_name_cat(path, name, sizeof(path), strlen(path), strlen(name));
- ret_value_if(result < 0, result);
-
- // ../../perprocess
-/* result = cgroup_make_subdir(path, MAKE_NAME(PER_PROCESS), NULL);
- ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
- path, MAKE_NAME(PER_PROCESS));
- // ../../group
- result = cgroup_make_subdir(path, MAKE_NAME(GROUP), NULL);
- ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
- path, MAKE_NAME(GROUP));*/
- }
-
- return RESOURCED_ERROR_NONE;
-}
-
int cgroup_make_subdir(const char* parentdir, const char* cgroup_name, bool *already)
{
char buf[MAX_PATH_LENGTH];
#define CGROUP_PATH "/sys/fs/cgroup"
-#define CGROUP_VIP_NAME ""
-#define CGROUP_HIGH_NAME "High"
-#define CGROUP_MEDIUM_NAME "Medium"
-#define CGROUP_LOW_NAME "Lowest"
-
#define CGROUP_PER_PROCESS_NAME ""
#define CGROUP_GROUP_NAME ""
#define CGROUP_DEFAULT_USE_HIERARCHY false
-/*
- * [cgroup information]
- * CGROUP_ROOT : root cgroup
- * CGROUP_VIP : cgroup for vip apps(or daemons)
- * CGROUP_HIGH : cgroup for foreground apps
- * CGROUP_MEDIUM : cgroup for background apps
- * CGROUP_LOW : cgroup for apps of the lowest privilege
- *
- * [cgroup hierarchy]
- * (normal mode)
- root(cpu, memory, io)
- * ├─high─(tizendocker)
- * │ └─medium
- * │ └─low
- * └─system.slice/user.slice (not controlled by resourced)
- *
- * (vip mode)
- root(cpu, memory, io)
- * │
- * vip
- * ├─high─(tizendocker)
- * │ └─medium
- * │ └─low
- * └─system.slice/user.slice (not controlled by resourced)
- */
-enum cgroup_type {
- CGROUP_TOP = -1,
- CGROUP_ROOT,
- CGROUP_VIP,
- CGROUP_HIGH,
- CGROUP_MEDIUM,
- CGROUP_LOW,
- CGROUP_END,
-};
+
+
struct cgroup {
/* hashname of memory cgroup for restoring memcg info*/
GSList *child_cgroups;
};
-/**
- * @desc Get cgroup type according to oom_score_adj
- * @param oom_score_adj - oom_score_adj
- * @return cgroup type
- */
-int cgroup_get_type(int oom_score_adj);
-
-/**
- * @desc Get the highest oom_score_adj of the cgroup type
- * @param type - cgroup type
- * @return oom_score_adj
- */
-int cgroup_get_highest_oom_score_adj(int type);
-/**
- * @desc Get the lowest oom_score_adj of the cgroup type
- * @param type - cgroup type
- * @return oom_score_adj
- */
-int cgroup_get_lowest_oom_score_adj(int type);
/**
* @desc Get one unsigned int32 value from cgroup
int cgroup_write_node_str(const char *cgroup_name,
const char *file_name, const char *string);
-/**
- * @desc make full cgroup,
- * @param parentdir - parent cgroup path
- * @return negative value if error
- */
-int cgroup_make_full_subdir(const char* parentdir);
/**
* @desc make cgroup,
int cgroup_get_pids(const char *name, GArray **pids);
-struct cgroup *get_cgroup_tree(int idx);
-void set_memcg_info(int idx, struct memcg_info *mi);
-struct memcg_info *get_memcg_info(int idx);
-GSList *get_child_cgroups(int idx);
-int get_parent_cgroup(int idx);
-void set_use_hierarchy(int idx, bool use_hierarchy);
-bool get_use_hierarchy(int idx);
-
//void cgroup_params_exit(void);
-void cgroup_params_init(void);
+//void cgroup_params_init(void);
#ifdef __cplusplus
}
return RESOURCED_ERROR_NONE;
}
+int cpucg_make_full_subdir(const char* parentdir)
+{
+ int result;
+ result = cgroup_make_subdir(parentdir, CPUCG_THROTTLING_NAME, NULL);
+ return result;
+}
extern "C" {
#endif /* __cplusplus */
-#define CPU_CGROUP_PATH(type) type == CGROUP_VIP ? CPUCG_VIP_GROUP_PATH : \
- type == CGROUP_HIGH ? CPUCG_HIGH_GROUP_PATH : \
- type == CGROUP_MEDIUM ? CPUCG_MEDIUM_GROUP_PATH : \
- type == CGROUP_LOW ? CPUCG_LOW_GROUP_PATH : NULL
-
-
#define CPUCG_NAME "cpu"
#define CPUCG_PATH CGROUP_PATH "/" CPUCG_NAME
-#define CPUCG_VIP_PATH CPUCG_PATH "/" CGROUP_VIP_NAME
-#define CPUCG_HIGH_PATH CPUCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME
-#define CPUCG_MEDIUM_PATH CPUCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME "/" CGROUP_MEDIUM_NAME
-#define CPUCG_LOW_PATH CPUCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME "/" CGROUP_MEDIUM_NAME "/" CGROUP_LOW_NAME
+
#define CPUCG_CONTROL_BANDWIDTH "cpu.cfs_quota_us"
#define CPUCG_CONTROL_FULL_BANDWIDTH "cpu.cfs_period_us"
#define CPUCG_RT_CONTROL_BANDWIDTH "cpu.rt_runtime_us"
#define CPUCG_RT_CONTROL_FULL_BANDWIDTH "cpu.rt_period_us"
#define CPUCG_SHARE "cpu.shares"
-#define CPUCG_VIP_PP_PATH CPUCG_VIP_PATH"/"CGROUP_PER_PROCESS_NAME
-#define CPUCG_VIP_GROUP_PATH CPUCG_VIP_PATH"/"CGROUP_GROUP_NAME
-
-#define CPUCG_HIGH_PP_PATH CPUCG_HIGH_PATH"/"CGROUP_PER_PROCESS_NAME
-#define CPUCG_HIGH_GROUP_PATH CPUCG_HIGH_PATH"/"CGROUP_GROUP_NAME
-
-#define CPUCG_MEDIUM_PP_PATH CPUCG_MEDIUM_PATH"/"CGROUP_PER_PROCESS_NAME
-#define CPUCG_MEDIUM_GROUP_PATH CPUCG_MEDIUM_PATH"/"CGROUP_GROUP_NAME
-
-#define CPUCG_LOW_PP_PATH CPUCG_LOW_PATH"/"CGROUP_PER_PROCESS_NAME
-#define CPUCG_LOW_GROUP_PATH CPUCG_LOW_PATH"/"CGROUP_GROUP_NAME
+#define CPUCG_THROTTLING_NAME "Throttling"
struct cpucg_conf {
char name[64];
int set_cpucg_conf(const char *name, const char *value);
void free_cpucg_conf(void);
int cpu_move_cgroup_foreach(pid_t pid, struct proc_app_info *pai, char *path);
+int cpucg_make_full_subdir(const char* parentdir);
#ifdef __cplusplus
}
#include "util.h"
#include "file-helper.h"
#include "config-parser.h"
+#include "procfs.h"
#include "proc-common.h"
#define BUF_MAX 1023
/*
* Special node that point's to /sys/fs/cgroup/memory - root of memcg group.
- * This is the same as memcg_tree[CGROUP_ROOT]->info.
+ * This is the same as memcg_tree[MEMCG_ROOT]->info.
*/
static struct memcg_info *memcg_root;
static struct memcg_conf *memcg_conf = NULL;
-static struct memcg_info gmemcg_info[CGROUP_END] = {
+/*
+ * This structure has full hierarchy of cgroups on running system.
+ **/
+static struct cgroup cgroup_tree[MEMCG_END] = {
+ {"/", MEMCG_TOP, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
+ {MEMCG_VIP_NAME, MEMCG_ROOT, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
+ {MEMCG_HIGH_NAME, MEMCG_VIP, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
+ {MEMCG_MEDIUM_NAME, MEMCG_HIGH, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
+ {MEMCG_LOW_NAME, MEMCG_MEDIUM, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
+};
+
+static struct memcg_info gmemcg_info[MEMCG_END] = {
{MEMCG_PATH,},
{MEMCG_VIP_PATH,},
{MEMCG_HIGH_PATH,},
{MEMCG_LOW_PATH,},
};
+int cgroup_get_type(int oom_score_adj)
+{
+ if (oom_score_adj == OOMADJ_SERVICE_MIN)
+ return MEMCG_VIP;
+ else if (oom_score_adj >= OOMADJ_SU &&
+ oom_score_adj < OOMADJ_BACKGRD_PERCEPTIBLE)
+ return MEMCG_HIGH;
+ else if (oom_score_adj >= OOMADJ_BACKGRD_PERCEPTIBLE &&
+ oom_score_adj < OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE)
+ return MEMCG_MEDIUM;
+ else if (oom_score_adj >= OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE &&
+ oom_score_adj <= OOMADJ_APP_MAX)
+ return MEMCG_LOW;
+ else
+ return MEMCG_ROOT;
+}
+
+int cgroup_get_lowest_oom_score_adj(int type)
+{
+ if (type < MEMCG_ROOT || type > MEMCG_LOW) {
+ _E("cgroup type should be located between MEMCG_ROOT and MEMCG_LOW");
+ }
+
+ if (type == MEMCG_VIP)
+ return OOMADJ_SERVICE_MIN;
+ else if (type == MEMCG_HIGH)
+ return OOMADJ_SU;
+ else if (type == MEMCG_MEDIUM)
+ return OOMADJ_BACKGRD_PERCEPTIBLE;
+ else if (type == MEMCG_LOW)
+ return OOMADJ_BACKGRD_UNLOCKED + OOMADJ_APP_INCREASE;
+ else
+ return OOMADJ_SU;
+}
+
+int cgroup_get_highest_oom_score_adj(int type)
+{
+ if (type == MEMCG_VIP)
+ return OOMADJ_SERVICE_MIN;
+ else if (type == MEMCG_HIGH)
+ return OOMADJ_FOREGRD_UNLOCKED;
+ else if (type == MEMCG_MEDIUM)
+ return OOMADJ_BACKGRD_UNLOCKED;
+ else
+ return OOMADJ_APP_MAX;
+}
+
+struct cgroup *get_cgroup_tree(int idx)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) cgroup tree is NULL", idx);
+ return NULL;
+ }
+ else
+ return &cgroup_tree[idx];
+}
+
+static void set_memcg_info(int idx, struct memcg_info *mi)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) index is out of scope", idx);
+ }
+ else
+ cgroup_tree[idx].memcg_info = mi;
+}
+
+struct memcg_info *get_memcg_info(int idx)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) cgroup tree's memcg info is NULL", idx);
+ return NULL;
+ }
+ else
+ return cgroup_tree[idx].memcg_info;
+}
+
+static GSList *get_child_cgroups(int idx)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) cgroup tree's child is NULL", idx);
+ return NULL;
+ }
+ else
+ return cgroup_tree[idx].child_cgroups;
+}
+
+static int get_parent_cgroup(int idx)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) cgroup range is out of scope", idx);
+ return MEMCG_TOP;
+ }
+ else {
+ return cgroup_tree[idx].parent_cgroup;
+ }
+}
+
+static void set_use_hierarchy(int idx, bool use_hierarchy)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) cgroup range is out of scope", idx);
+ }
+ else {
+ cgroup_tree[idx].use_hierarchy = use_hierarchy;
+ }
+}
+
+bool get_use_hierarchy(int idx)
+{
+ if(idx < MEMCG_ROOT || idx >= MEMCG_END) {
+ _E("[CGROUP] (%d) cgroup range is out of scope", idx);
+ return CGROUP_DEFAULT_USE_HIERARCHY;
+ }
+ else {
+ return cgroup_tree[idx].use_hierarchy;
+ }
+}
+
void register_totalram_bytes(unsigned long long ram_bytes)
{
totalram_bytes = ram_bytes;
unsigned int i;
unsigned long long lower_group_limit_bytes = 0;
- for (i = CGROUP_LOW; i > CGROUP_ROOT; i--) {
+ for (i = MEMCG_LOW; i > MEMCG_ROOT; i--) {
struct memcg_info *mi = get_memcg_info(i);
if (mi->limit_bytes < lower_group_limit_bytes)
{
unsigned int i;
- for (i = CGROUP_VIP; i < CGROUP_END; i++) {
+ for (i = MEMCG_VIP; i < MEMCG_END; i++) {
struct memcg_info *mi = get_memcg_info(i);
memcg_write_optimizer_info(mi);
}
int idx = 0;
GSList *child_cgroups;
- for (idx = CGROUP_ROOT; idx < CGROUP_END; idx++) {
+ for (idx = MEMCG_ROOT; idx < MEMCG_END; idx++) {
struct memcg_info *mi = &gmemcg_info[idx];
-
+
set_memcg_info(idx, mi);
- if(idx == CGROUP_ROOT)
+ if(idx == MEMCG_ROOT)
memcg_root = mi;
else {
int parent_idx = get_parent_cgroup(idx);
_I("init memory cgroup for %s", mi->name);
}
}
+
+int memcg_make_full_subdir(const char* parentdir)
+{
+ int result;
+ char path[MAX_PATH_LENGTH] = {0, };
+
+ result = str_name_cpy(path, parentdir, sizeof(path), strlen(parentdir));
+ ret_value_if(result < 0, result);
+
+ for(int i = MEMCG_VIP; i < MEMCG_END; i++) {
+ char name[MAX_NAME_LENGTH] = {0, };
+
+ if(i == MEMCG_VIP) {
+ result = str_name_cpy(name, MEMCG_MAKE_NAME(VIP), sizeof(name), strlen(MEMCG_MAKE_NAME(VIP)));
+ ret_value_if(result < 0, result);
+ }
+ else if(i == MEMCG_HIGH) {
+ result = str_name_cpy(name, MEMCG_MAKE_NAME(HIGH), sizeof(name), strlen(MEMCG_MAKE_NAME(HIGH)));
+ ret_value_if(result < 0, result);
+ }
+ else if(i == MEMCG_MEDIUM) {
+ result = str_name_cpy(name, MEMCG_MAKE_NAME(MEDIUM), sizeof(name), strlen(MEMCG_MAKE_NAME(MEDIUM)));
+ ret_value_if(result < 0, result);
+ }
+ else if(i == MEMCG_LOW) {
+ result = str_name_cpy(name, MEMCG_MAKE_NAME(LOW), sizeof(name), strlen(MEMCG_MAKE_NAME(LOW)));
+ ret_value_if(result < 0, result);
+ }
+
+ result = cgroup_make_subdir(path, name, NULL);
+ ret_value_msg_if(result < 0, result, "%s/%s init failed\n", path, name);
+
+ result = str_name_cat(path, "/", sizeof(path), strlen(path), 1);
+ ret_value_if(result < 0, result);
+ result = str_name_cat(path, name, sizeof(path), strlen(path), strlen(name));
+ ret_value_if(result < 0, result);
+
+ // ../../perprocess
+/* result = cgroup_make_subdir(path, MEMCG_MAKE_NAME(PER_PROCESS), NULL);
+ ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
+ path, MEMCG_MAKE_NAME(PER_PROCESS));
+ // ../../group
+ result = cgroup_make_subdir(path, MEMCG_MAKE_NAME(GROUP), NULL);
+ ret_value_msg_if(result < 0, result, "%s/%s init failed\n",
+ path, MEMCG_MAKE_NAME(GROUP));*/
+ }
+
+ return RESOURCED_ERROR_NONE;
+}
extern "C" {
#endif /* __cplusplus */
+#define MEMCG_MAKE_NAME(name) MEMCG_##name##_NAME
+
/* number of memory cgroups */
#define MEMCG_DEFAULT_EVENT_LEVEL "low"
#define MEMCG_MEDIUM_RATIO 0.96
#define MEMCG_FOREGROUND_LEAVE_RATIO 0.25
+#define MEMCG_VIP_NAME ""
+#define MEMCG_HIGH_NAME "High"
+#define MEMCG_MEDIUM_NAME "Medium"
+#define MEMCG_LOW_NAME "Lowest"
+
#define MEMCG_NAME "memory"
#define MEMCG_PATH CGROUP_PATH "/" MEMCG_NAME
-#define MEMCG_VIP_PATH MEMCG_PATH "/" CGROUP_VIP_NAME
-#define MEMCG_HIGH_PATH MEMCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME
-#define MEMCG_MEDIUM_PATH MEMCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME "/" CGROUP_MEDIUM_NAME
-#define MEMCG_LOW_PATH MEMCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME "/" CGROUP_MEDIUM_NAME "/" CGROUP_LOW_NAME
+#define MEMCG_VIP_PATH MEMCG_PATH "/" MEMCG_VIP_NAME
+#define MEMCG_HIGH_PATH MEMCG_PATH "/" MEMCG_VIP_NAME "/" MEMCG_HIGH_NAME
+#define MEMCG_MEDIUM_PATH MEMCG_PATH "/" MEMCG_VIP_NAME "/" MEMCG_HIGH_NAME "/" MEMCG_MEDIUM_NAME
+#define MEMCG_LOW_PATH MEMCG_PATH "/" MEMCG_VIP_NAME "/" MEMCG_HIGH_NAME "/" MEMCG_MEDIUM_NAME "/" MEMCG_LOW_NAME
#define MEMCG_VIP_PP_PATH MEMCG_VIP_PATH "/" CGROUP_PER_PROCESS_NAME
#define MEMCG_VIP_GROUP_PATH MEMCG_VIP_PATH "/" CGROUP_GROUP_NAME
CGROUP_MEMORY_STAT_INVALID = -1,
};
+/*
+ * [memory cgroup information]
+ * MEMCG_ROOT : memory cgroup for root dir
+ * MEMCG_VIP : memory cgroup for vip apps(or daemons)
+ * MEMCG_HIGH : memory cgroup for foreground apps
+ * MEMCG_MEDIUM : memory cgroup for background apps
+ * MEMCG_LOW : memory cgroup for apps of the lowest privilege
+ *
+ * [memory cgroup hierarchy]
+ * (normal mode)
+ * root
+ * ├─high─(tizendocker)
+ * │ └─medium
+ * │ └─low
+ * └─system.slice/user.slice
+ *
+ * (vip mode)
+ * root
+ * │
+ * vip
+ * ├─high─(tizendocker)
+ * │ └─medium
+ * │ └─low
+ * └─system.slice/user.slice
+ */
+enum cgroup_type {
+ MEMCG_TOP = -1,
+ MEMCG_ROOT,
+ MEMCG_VIP,
+ MEMCG_HIGH,
+ MEMCG_MEDIUM,
+ MEMCG_LOW,
+ MEMCG_END,
+};
+
struct mem_threshold {
bool percent;
int threshold; /* MB or % */
struct mem_action widget;
struct mem_action guiapp;
struct mem_action background;
- float cgroup_limit[CGROUP_END]; /* % */
+ float cgroup_limit[MEMCG_END]; /* % */
bool oom_popup;
};
struct memcg_info {
/* name of memory cgroup */
char name[MAX_PATH_LENGTH];
- /* hashname of memory cgroup for restoring memcg info*/
- /* parent id */
/* limit ratio, if don't want to set limit, use NO_LIMIT*/
float limit_ratio;
unsigned long long limit_bytes;
struct memcg_info *get_root_memcg_info(void);
void memcg_params_init(void);
+/**
+ * @desc make full cgroup,
+ * @param parentdir - parent cgroup path
+ * @return negative value if error
+ */
+int memcg_make_full_subdir(const char* parentdir);
+bool get_use_hierarchy(int idx);
+
+/**
+ * @desc Get cgroup type according to oom_score_adj
+ * @param oom_score_adj - oom_score_adj
+ * @return cgroup type
+ */
+int cgroup_get_type(int oom_score_adj);
+
+/**
+ * @desc Get the highest oom_score_adj of the cgroup type
+ * @param type - cgroup type
+ * @return oom_score_adj
+ */
+int cgroup_get_highest_oom_score_adj(int type);
+
+/**
+ * @desc Get the lowest oom_score_adj of the cgroup type
+ * @param type - cgroup type
+ * @return oom_score_adj
+ */
+int cgroup_get_lowest_oom_score_adj(int type);
+struct memcg_info *get_memcg_info(int idx);
+struct cgroup *get_cgroup_tree(int idx);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
}
else if (!strncmp(result->name, VIP_GROUP_SWAPPINESS_CONF,
strlen(VIP_GROUP_SWAPPINESS_CONF)+1)) {
- swap_conf->swappiness[CGROUP_VIP] = atoi(result->value);
+ swap_conf->swappiness[MEMCG_VIP] = atoi(result->value);
}
else if (!strncmp(result->name, HIGH_GROUP_SWAPPINESS_CONF,
strlen(HIGH_GROUP_SWAPPINESS_CONF)+1)) {
- swap_conf->swappiness[CGROUP_HIGH] = atoi(result->value);
+ swap_conf->swappiness[MEMCG_HIGH] = atoi(result->value);
}
else if (!strncmp(result->name, MEDIUM_GROUP_SWAPPINESS_CONF,
strlen(MEDIUM_GROUP_SWAPPINESS_CONF)+1)) {
- swap_conf->swappiness[CGROUP_MEDIUM] = atoi(result->value);
+ swap_conf->swappiness[MEMCG_MEDIUM] = atoi(result->value);
}
else if (!strncmp(result->name, LOWEST_GROUP_SWAPPINESS_CONF,
strlen(LOWEST_GROUP_SWAPPINESS_CONF)+1)) {
- swap_conf->swappiness[CGROUP_LOW] = atoi(result->value);
+ swap_conf->swappiness[MEMCG_LOW] = atoi(result->value);
}
else {
_E("[CONFIG] Unknown configuration name (%s) and value (%s) on section (%s)",
if (!strncmp(result->name, VIP_GROUP_LIMIT_CONF,
strlen(VIP_GROUP_LIMIT_CONF) + 1)) {
- memcg_conf->cgroup_limit[CGROUP_VIP] = atof(result->value);
+ memcg_conf->cgroup_limit[MEMCG_VIP] = atof(result->value);
}
else if (!strncmp(result->name, HIGH_GROUP_LIMIT_CONF,
strlen(HIGH_GROUP_LIMIT_CONF) + 1)) {
- memcg_conf->cgroup_limit[CGROUP_HIGH] = atof(result->value);
+ memcg_conf->cgroup_limit[MEMCG_HIGH] = atof(result->value);
}
else if (!strncmp(result->name, MEDIUM_GROUP_LIMIT_CONF,
strlen(MEDIUM_GROUP_LIMIT_CONF) + 1)) {
- memcg_conf->cgroup_limit[CGROUP_MEDIUM] = atof(result->value);
+ memcg_conf->cgroup_limit[MEMCG_MEDIUM] = atof(result->value);
}
else if (!strncmp(result->name, LOWEST_GROUP_LIMIT_CONF,
strlen(LOWEST_GROUP_LIMIT_CONF) + 1)) {
- memcg_conf->cgroup_limit[CGROUP_LOW] = atof(result->value);
+ memcg_conf->cgroup_limit[MEMCG_LOW] = atof(result->value);
}
else {
_E("[CONFIG] Unknown configuration name (%s) and value (%s) on section (%s)",
_E("Failed to allocate memory during parsing vendor configurations");
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
- pci->mem_type = CGROUP_TOP;
- pci->cpu_type = CGROUP_TOP;
+ pci->mem_type = MEMCG_TOP;
pci->cpu_sched_info.cpu_sched_type = CPU_SCHED_NONE;
pci->cpu_sched_info.cpu_rt_priority = CPU_INIT_PRIO;
pci->cpu_sched_info.cpu_nice = CPU_INIT_NICE;
}
}
/* limiter.conf.d */
- else if (!strncmp(result->name, CPU_CGROUP_NAME_CONF, strlen(CPU_CGROUP_NAME_CONF)+1) &&
+/* else if (!strncmp(result->name, CPU_CGROUP_NAME_CONF, strlen(CPU_CGROUP_NAME_CONF)+1) &&
*config_type == LIMITER_CONFIG) {
if (!pci) {
_E("process configuration information pointer should not be NULL");
_E("invalid parameter (%s)", result->value);
return RESOURCED_ERROR_INVALID_PARAMETER;
}
- }
+ }*/
else if (!strncmp(result->name, MEM_CGROUP_NAME_CONF, strlen(MEM_CGROUP_NAME_CONF)+1) &&
*config_type == LIMITER_CONFIG) {
if (!pci) {
if (!strncmp(result->value, CGROUP_VIP_VALUE_CONF,
strlen(CGROUP_VIP_VALUE_CONF) +1)) {
- pci->mem_type = CGROUP_VIP;
+ pci->mem_type = MEMCG_VIP;
}
else if (!strncmp(result->value, CGROUP_HIGH_VALUE_CONF,
strlen(CGROUP_HIGH_VALUE_CONF) +1)) {
- pci->mem_type = CGROUP_HIGH;
+ pci->mem_type = MEMCG_HIGH;
}
else if (!strncmp(result->value, CGROUP_MEDIUM_VALUE_CONF,
strlen(CGROUP_MEDIUM_VALUE_CONF) +1)) {
- pci->mem_type = CGROUP_MEDIUM;
+ pci->mem_type = MEMCG_MEDIUM;
}
else if (!strncmp(result->value, CGROUP_LOW_VALUE_CONF,
strlen(CGROUP_LOW_VALUE_CONF) +1)) {
- pci->mem_type = CGROUP_LOW;
+ pci->mem_type = MEMCG_LOW;
}
else {
_E("invalid parameter (%s)", result->value);
bool enable;
bool boot_reclaim_enable;
enum swap_type swap_type;
- int swappiness[CGROUP_END];
+ int swappiness[MEMCG_END];
struct zram_conf zram;
struct zswap_conf zswap;
};
else
pai->starttime = uptime;
}
- proc_set_process_memory_state(pai, CGROUP_TOP, NULL, OOMADJ_APP_MAX + 10);
+ proc_set_process_memory_state(pai, MEMCG_TOP, NULL, OOMADJ_APP_MAX + 10);
pai->memory.use_mem_limit = false;
pai->memory.oom_killed = false;
pai->memory.memlimit_update_exclude = false;
pci = fixed_app_and_service_exist_check(appid, APP_TYPE);
if (pci) {
- if (pci->mem_type != CGROUP_TOP) {
+ if (pci->mem_type != MEMCG_TOP) {
proc_set_oom_score_adj(pid, cgroup_get_lowest_oom_score_adj(pci->mem_type), pai);
pai->app_memcg_update_exclude = true;
}
- if (pci->cpu_type != CGROUP_TOP) {
+/* if (pci->cpu_type != CGROUP_TOP) {
cpu_move_cgroup_foreach(pid, pai, CPU_CGROUP_PATH(pci->cpu_type));
pai->app_cpucg_update_exclude = true;
- }
+ }*/
memset(&attr, 0, sizeof(struct sched_attr));
attr.size = sizeof(struct sched_attr);
#define CPU_BACKGROUND_PRI 1
#define CPU_CONTROL_PRI 10
-//static GSource *cpu_predefined_timer;
-static bool bCPUQuota;
-
-static inline int ioprio_set(int which, int who, int ioprio)
-{
- return syscall(__NR_ioprio_set, which, who, ioprio);
-}
-
-enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
-};
-
-enum {
- IOPRIO_WHO_PROCESS = 1,
- IOPRIO_WHO_PGRP,
- IOPRIO_WHO_USER,
-};
-
-#define IOPRIO_CLASS_SHIFT 13
-
static void cpu_priority_update(int which, pid_t pid, int priority, struct proc_app_info *pai)
{
if (pai && pai->app_cpu_nice_update_exclude)
setpriority(which, pid, priority);
}
-static bool cpu_quota_enabled(void)
-{
- return bCPUQuota;
-}
-
-static void cpu_check_cpuquota(void)
-{
- int ret, node = 0;
- char buf[MAX_PATH_LENGTH];
-
- snprintf(buf, sizeof(buf), "%s/%s", CPUCG_PATH, CPUCG_CONTROL_BANDWIDTH);
- ret = fread_int(buf, &node);
- if (!ret)
- bCPUQuota = true;
-}
-
-static int cpu_service_state(void *data)
-{
- struct proc_status *ps = (struct proc_status *)data;
- assert(ps && ps->pai);
- if (!ps->pai)
- return RESOURCED_ERROR_NO_DATA;
-
- _D("service launch: pid = %d, appname = %s", ps->pid, ps->pai->appid);
- if (CHECK_BIT(ps->pai->categories, PROC_BG_SYSTEM) ||
- CHECK_BIT(ps->pai->categories, PROC_BG_MEDIA))
- return RESOURCED_ERROR_NONE;
-
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_MEDIUM_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
-static int cpu_widget_state(void *data)
-{
- struct proc_status *ps = (struct proc_status *)data;
- assert(ps);
- assert(ps->pai);
-
- _D("widget background: pid = %d, appname = %s", ps->pid, ps->pai->appid);
- if (CHECK_BIT(ps->pai->flags, PROC_DOWNLOADAPP))
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_MEDIUM_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
static int cpu_foreground_state(void *data)
{
struct proc_status *ps = (struct proc_status *)data;
pri = getpriority(PRIO_PROCESS, ps->pid);
if (pri == -1 || pri > CPU_DEFAULT_PRI)
cpu_priority_update(PRIO_PGRP, ps->pid, CPU_DEFAULT_PRI, ps->pai);
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_HIGH_GROUP_PATH);
return RESOURCED_ERROR_NONE;
}
_D("app background: pid = %d", ps->pid);
cpu_priority_update(PRIO_PGRP, ps->pid, CPU_BACKGROUND_PRI, ps->pai);
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_MEDIUM_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
-static int cpu_restrict_state(void *data)
-{
- struct proc_status *ps = (struct proc_status *)data;
- assert(ps && ps->pai);
-
- if (!ps->pai)
- return RESOURCED_ERROR_NO_DATA;
-
- if (CHECK_BIT(ps->pai->categories, PROC_BG_SYSTEM) ||
- CHECK_BIT(ps->pai->categories, PROC_BG_MEDIA))
- return RESOURCED_ERROR_NONE;
-
- _D("app suspend: pid = %d, appname = %s", ps->pid, ps->pai->appid);
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_LOW_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
-static int cpu_active_state(void *data)
-{
- struct proc_status *ps = (struct proc_status *)data;
- int oom_score_adj = 0, ret;
- assert(ps);
-
- _D("app active : pid = %d", ps->pid);
- ret = proc_get_oom_score_adj(ps->pid, &oom_score_adj);
- if (ret || oom_score_adj < OOMADJ_PREVIOUS_DEFAULT)
- return RESOURCED_ERROR_NONE;
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_HIGH_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
-static int cpu_system_state(void *data)
-{
- struct proc_status *ps = (struct proc_status *)data;
- assert(ps);
-
- _D("system service : pid = %d", ps->pid);
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_MEDIUM_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
-static int cpu_terminatestart_state(void *data)
-{
- struct proc_status *ps = (struct proc_status *)data;
- assert(ps);
-
- cpu_move_cgroup_foreach(ps->pid, ps->pai, CPUCG_HIGH_GROUP_PATH);
- return RESOURCED_ERROR_NONE;
-}
-
-static int cpu_exclude_state(void *data)
-{
- struct proc_exclude *pe = (struct proc_exclude *)data;
- if (pe->type == PROC_INCLUDE)
- cpu_move_cgroup_foreach(pe->pid, find_app_info(pe->pid), CPUCG_MEDIUM_GROUP_PATH);
- else
- cpu_move_cgroup_foreach(pe->pid, find_app_info(pe->pid), CPUCG_HIGH_GROUP_PATH);
-
return RESOURCED_ERROR_NONE;
}
int ret_code;
_D("resourced cpu init start");
- ret_code = cgroup_make_full_subdir(CPUCG_PATH);
+ ret_code = cpucg_make_full_subdir(CPUCG_PATH);
ret_value_msg_if(ret_code < 0, ret_code, "cpu cgroup init failed\n");
- cpu_check_cpuquota();
- register_notifier(RESOURCED_NOTIFIER_SERVICE_LAUNCH, cpu_service_state);
register_notifier(RESOURCED_NOTIFIER_APP_RESUME, cpu_foreground_state);
register_notifier(RESOURCED_NOTIFIER_APP_FOREGRD, cpu_foreground_state);
- register_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, cpu_background_state);
- register_notifier(RESOURCED_NOTIFIER_SYSTEM_SERVICE, cpu_system_state);
- register_notifier(RESOURCED_NOTIFIER_APP_TERMINATE_START, cpu_terminatestart_state);
- register_notifier(RESOURCED_NOTIFIER_CONTROL_EXCLUDE, cpu_exclude_state);
register_notifier(RESOURCED_NOTIFIER_WIDGET_FOREGRD, cpu_foreground_state);
- register_notifier(RESOURCED_NOTIFIER_WIDGET_BACKGRD, cpu_widget_state);
- register_notifier(RESOURCED_NOTIFIER_APP_ACTIVE, cpu_active_state);
- if (cpu_quota_enabled())
- register_notifier(RESOURCED_NOTIFIER_APP_SUSPEND_READY,
- cpu_restrict_state);
+ register_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, cpu_background_state);
return RESOURCED_ERROR_NONE;
}
static int resourced_cpu_finalize(void *data)
{
- unregister_notifier(RESOURCED_NOTIFIER_SERVICE_LAUNCH, cpu_service_state);
unregister_notifier(RESOURCED_NOTIFIER_APP_RESUME, cpu_foreground_state);
unregister_notifier(RESOURCED_NOTIFIER_APP_FOREGRD, cpu_foreground_state);
- unregister_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, cpu_background_state);
- unregister_notifier(RESOURCED_NOTIFIER_SYSTEM_SERVICE, cpu_system_state);
- unregister_notifier(RESOURCED_NOTIFIER_APP_TERMINATE_START, cpu_terminatestart_state);
- unregister_notifier(RESOURCED_NOTIFIER_CONTROL_EXCLUDE, cpu_exclude_state);
unregister_notifier(RESOURCED_NOTIFIER_WIDGET_FOREGRD, cpu_foreground_state);
- unregister_notifier(RESOURCED_NOTIFIER_WIDGET_BACKGRD, cpu_widget_state);
- unregister_notifier(RESOURCED_NOTIFIER_APP_ACTIVE, cpu_active_state);
- if (cpu_quota_enabled())
- unregister_notifier(RESOURCED_NOTIFIER_APP_SUSPEND_READY,
- cpu_restrict_state);
+ unregister_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, cpu_background_state);
+
return RESOURCED_ERROR_NONE;
}
ret_unless(level >= 0);
ret_unless(thres_mb >= 0);
- memcg_set_threshold(CGROUP_ROOT, level, thres_mb);
+ memcg_set_threshold(MEMCG_ROOT, level, thres_mb);
}
static void lowmem_dbus_oom_set_leave_threshold(GVariant *params)
g_variant_get(params, gtype, &thres_mb);
ret_unless(thres_mb >= 0);
- memcg_set_leave_threshold(CGROUP_ROOT, thres_mb);
+ memcg_set_leave_threshold(MEMCG_ROOT, thres_mb);
}
static void lowmem_dbus_oom_trigger(GVariant *params)
{
lowmem_trigger_reclaim(OOM_NOMEMORY_CHECK,
- MAX_MEMORY_CGROUP_VICTIMS, CGROUP_LOW, 0);
+ MAX_MEMORY_CGROUP_VICTIMS, MEMCG_LOW, 0);
}
static void lowmem_dbus_set_perceptible(GVariant *params)
g_variant_get(params, gtype, &pid);
ret_unless(pid > 0);
- lowmem_trigger_swap(pid, get_memcg_info(CGROUP_LOW)->name, true);
+ lowmem_trigger_swap(pid, get_memcg_info(MEMCG_LOW)->name, true);
}
static void lowmem_dbus_set_memlimit(GVariant *params)
{
static const char *type_table[] =
{"/", "VIP", "High", "Medium", "Lowest"};
- if (type >= CGROUP_ROOT && type <= CGROUP_LOW)
+ if (type >= MEMCG_ROOT && type <= MEMCG_LOW)
return type_table[type];
else
return "Error";
}
/* VIP pids should be excluded from the LMK list */
- if (cgroup_get_type(oom) == CGROUP_VIP)
+ if (cgroup_get_type(oom) == MEMCG_VIP)
continue;
/*
static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
{
- if (type == CGROUP_VIP || type >= CGROUP_END || type <= CGROUP_TOP) {
+ if (type == MEMCG_VIP || type >= MEMCG_END || type <= MEMCG_TOP) {
_E("cgroup type (%d) is out of scope", type);
return RESOURCED_ERROR_FAIL;
}
(Make sluggish or kill same victims continuously)
Thus, otherwise, just return in first operation and wait some period.
*/
- if (cgroup_type == CGROUP_LOW) {
- cgroup_type = CGROUP_MEDIUM;
+ if (cgroup_type == MEMCG_LOW) {
+ cgroup_type = MEMCG_MEDIUM;
goto retry;
- } else if ((cgroup_type == CGROUP_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
- cgroup_type = CGROUP_HIGH;
+ } else if ((cgroup_type == MEMCG_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
+ cgroup_type = MEMCG_HIGH;
if(ctl->flags & OOM_FORCE)
max_victim_cnt = FOREGROUND_VICTIMS;
goto retry;
- } else if ((cgroup_type == CGROUP_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
+ } else if ((cgroup_type == MEMCG_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
status = LOWMEM_RECLAIM_RETRY;
- ctl->type = CGROUP_ROOT;
+ ctl->type = MEMCG_ROOT;
}
- else if (cgroup_type == CGROUP_ROOT) {
+ else if (cgroup_type == MEMCG_ROOT) {
status = LOWMEM_RECLAIM_RETRY;
}
done:
return;
}
- /* In this case, corresponding process will be moved to memory CGROUP_LOW.
+ /* In this case, corresponding process will be moved to memory MEMCG_LOW.
*/
if (move) {
error = proc_get_oom_score_adj(pid, &oom_score_adj);
return;
}
- lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(CGROUP_LOW);
+ lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(MEMCG_LOW);
if (oom_score_adj < lowest_oom_score_adj) {
oom_score_adj = lowest_oom_score_adj;
}
/* Correponding process is already managed per app or service.
- * In addition, if some process is already located in the CGROUP_LOW, then just do swap
+ * In addition, if some process is already located in the MEMCG_LOW, then just do swap
*/
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
}
change_lowmem_state(MEM_LEVEL_HIGH);
if (swap_get_state() == SWAP_ON && memcg_swap_status) {
- resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(CGROUP_LOW));
+ resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(MEMCG_LOW));
memcg_swap_status = false;
}
if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
ctl = LOWMEM_NEW_REQUEST();
if (ctl) {
LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
- CGROUP_LOW, get_root_memcg_info()->threshold_leave_mb,
+ MEMCG_LOW, get_root_memcg_info()->threshold_leave_mb,
num_max_victims, medium_cb);
lowmem_queue_request(&lmw, ctl);
}
/* set thresholds for ram size 64M */
proactive_threshold_mb = PROACTIVE_64_THRES;
proactive_leave_mb = PROACTIVE_64_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_256) {
/* set thresholds for ram size 256M */
proactive_threshold_mb = PROACTIVE_256_THRES;
proactive_leave_mb = PROACTIVE_256_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_448) {
/* set thresholds for ram size 448M */
proactive_threshold_mb = PROACTIVE_448_THRES;
proactive_leave_mb = PROACTIVE_448_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_512) {
/* set thresholds for ram size 512M */
proactive_threshold_mb = PROACTIVE_512_THRES;
proactive_leave_mb = PROACTIVE_512_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_768) {
/* set thresholds for ram size 512M */
proactive_threshold_mb = PROACTIVE_768_THRES;
proactive_leave_mb = PROACTIVE_768_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_1024) {
/* set thresholds for ram size more than 1G */
proactive_threshold_mb = PROACTIVE_1024_THRES;
proactive_leave_mb = PROACTIVE_1024_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
} else if (total_ramsize_mb <= MEM_SIZE_2048) {
proactive_threshold_mb = PROACTIVE_2048_THRES;
proactive_leave_mb = PROACTIVE_2048_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
} else {
proactive_threshold_mb = PROACTIVE_3072_THRES;
proactive_leave_mb = PROACTIVE_3072_LEAVE;
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
- memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
- memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
+ memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
+ memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
}
}
struct memcg_info *mi;
int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
- if(next_memcg_idx < CGROUP_VIP || next_memcg_idx > CGROUP_LOW) {
+ if(next_memcg_idx < MEMCG_VIP || next_memcg_idx > MEMCG_LOW) {
_E("cgroup type (%d) should not be called", next_memcg_idx);
return;
}
*/
if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
/* VIP processes should not be asked to move. */
- if (cur_memcg_idx <= CGROUP_VIP) {
+ if (cur_memcg_idx <= MEMCG_VIP) {
_E("[MEMORY-CGROUP] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
return;
}
return;
cgroup_write_pid_fullpath(mi->name, pid);
- if (next_memcg_idx == CGROUP_LOW)
- lowmem_swap_memory(get_memcg_info(CGROUP_LOW)->name);
+ if (next_memcg_idx == MEMCG_LOW)
+ lowmem_swap_memory(get_memcg_info(MEMCG_LOW)->name);
}
/* child pid */
else {
static bool lowmem_press_eventfd_handler(int fd, void *data)
{
struct memcg_info *mi;
- enum cgroup_type type = CGROUP_ROOT;
+ enum cgroup_type type = MEMCG_ROOT;
// FIXME: probably shouldn't get ignored
if (lowmem_press_eventfd_read(fd) < 0)
_E("Failed to read lowmem press event, %m\n");
- for (type = CGROUP_ROOT; type < CGROUP_END; type++) {
+ for (type = MEMCG_ROOT; type < MEMCG_END; type++) {
if (!get_cgroup_tree(type) || !get_memcg_info(type))
continue;
mi = get_memcg_info(type);
if (fd == mi->evfd) {
/* call low memory handler for this memcg */
- if (type == CGROUP_ROOT) {
+ if (type == MEMCG_ROOT) {
lowmem_press_root_cgroup_handler();
return true;
}
{
unsigned int i;
- for (i = CGROUP_ROOT; i < CGROUP_END; i++) {
+ for (i = MEMCG_ROOT; i < MEMCG_END; i++) {
if (!get_use_hierarchy(i))
continue;
flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
- type = type > 0 ? type : CGROUP_LOW;
+ type = type > 0 ? type : MEMCG_LOW;
threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
_D("history based proactive LMK : avg rss %u, available %u required = %u MB",
rss_mb, before_mb, size_mb);
- lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size_mb);
+ lowmem_trigger_reclaim(0, victims, MEMCG_LOW, size_mb);
return;
}
*/
_D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
proactive_leave_mb + THRESHOLD_MARGIN);
- lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
+ lowmem_trigger_reclaim(0, victims, MEMCG_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
}
unsigned int lowmem_get_proactive_thres(void)
struct memcg_conf *memcg_conf = get_memcg_conf();
/* set MemoryGroupLimit section */
- for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
+ for (int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
if (memcg_conf->cgroup_limit[cgroup] > 0.0)
memcg_info_set_limit(get_memcg_info(cgroup),
memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
if (memcg_conf->threshold[lvl].percent &&
memcg_conf->threshold[lvl].threshold > 0) {
- memcg_set_threshold(CGROUP_ROOT, lvl,
+ memcg_set_threshold(MEMCG_ROOT, lvl,
calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
if (lvl == MEM_LEVEL_OOM)
- memcg_set_leave_threshold(CGROUP_ROOT,
- get_memcg_info(CGROUP_ROOT)->threshold_mb[lvl] * 2);
+ memcg_set_leave_threshold(MEMCG_ROOT,
+ get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 2);
}
else if (memcg_conf->threshold[lvl].threshold > 0) {
- memcg_set_threshold(CGROUP_ROOT, lvl,
+ memcg_set_threshold(MEMCG_ROOT, lvl,
memcg_conf->threshold[lvl].threshold);
if (lvl == MEM_LEVEL_OOM)
- memcg_set_leave_threshold(CGROUP_ROOT,
- get_memcg_info(CGROUP_ROOT)->threshold_mb[lvl] * 2);
+ memcg_set_leave_threshold(MEMCG_ROOT,
+ get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 2);
}
}
oom_popup_enable = memcg_conf->oom_popup;
static void print_mem_configs(void)
{
/* print info of Memory section */
- for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
+ for (int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
_I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
}
- for (int cgroup = CGROUP_ROOT; cgroup < CGROUP_END; cgroup++) {
+ for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
_I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", convert_cgroup_type_to_str(cgroup),
convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
_D("resourced memory init start");
/* init memcg */
- ret = cgroup_make_full_subdir(MEMCG_PATH);
+ ret = memcg_make_full_subdir(MEMCG_PATH);
ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
memcg_params_init();
if (ret < 0)
return;
- for (index = CGROUP_END-1; index >= CGROUP_ROOT; index--) {
+ for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
cgroup = get_cgroup_tree(index);
if (!cgroup)
continue;
return RESOURCED_ERROR_FAIL;
}
- cgroup_write_node_int32(CPUCG_VIP_PP_PATH, CPUCG_RT_CONTROL_BANDWIDTH, runtime);
- cgroup_write_node_int32(CPUCG_HIGH_PP_PATH, CPUCG_RT_CONTROL_BANDWIDTH, runtime);
-
- result = asprintf(&path, "%s/%s", CPUCG_HIGH_PP_PATH, ps->pci->name);
+ result = asprintf(&path, "%s/%s", CPUCG_PATH, ps->pci->name);
if (result < 0) {
_E("[CPU-SCHED] not enough memory");
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
- result = cgroup_make_subdir(CPUCG_HIGH_PP_PATH, ps->pci->name, NULL);
+ result = cgroup_make_subdir(CPUCG_PATH, ps->pci->name, NULL);
if (result < 0) {
_E("[CPU-SCHED] Failed to create cgroup subdir '%s/%s'",
- CPUCG_HIGH_PP_PATH, ps->pci->name);
+ CPUCG_PATH, ps->pci->name);
return RESOURCED_ERROR_FAIL;
}
* It means that there are many background processes or
* some process makes memory leak.
* So, it requires to trigger proactive oom killer
- * with CGROUP_ROOT type.
+ * with MEMCG_ROOT type.
*/
- lowmem_trigger_swap_reclaim(CGROUP_ROOT, swap_size);
+ lowmem_trigger_swap_reclaim(MEMCG_ROOT, swap_size);
return -ENOSPC;
}
return;
}
- cgroup_swap = get_cgroup_tree(CGROUP_LOW);
+ cgroup_swap = get_cgroup_tree(MEMCG_LOW);
if (!cgroup_swap)
return;
- swap_move_to_cgroup_by_pid(CGROUP_LOW, pid);
+ swap_move_to_cgroup_by_pid(MEMCG_LOW, pid);
swap_start_handler(cgroup_swap->memcg_info->name);
_I("swap cgroup entered : pid : %d", (int)pid);
}
_I("[SWAP] swap at boot %s", arg_swap_at_boot == true ? "enable" : "disable");
_I("[SWAP] swap type = %d", arg_swap_type);
- for(int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
- _I("[SWAP] cgroup (%s) swapiness = %d", cgroup == CGROUP_VIP ? "vip" :
- cgroup == CGROUP_HIGH ? "high" :
- cgroup == CGROUP_MEDIUM ? "medium" : "lowest", get_memcg_info(cgroup)->swappiness);
+ for(int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
+ _I("[SWAP] cgroup (%s) swapiness = %d", cgroup == MEMCG_VIP ? "vip" :
+ cgroup == MEMCG_HIGH ? "high" :
+ cgroup == MEMCG_MEDIUM ? "medium" : "lowest", get_memcg_info(cgroup)->swappiness);
}
}
arg_swap_at_boot = swap_conf->boot_reclaim_enable;
arg_swap_type = swap_conf->swap_type;
- for(int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
+ for(int cgroup = MEMCG_VIP; cgroup < MEMCG_END; cgroup++) {
if (swap_conf->swappiness[cgroup] >= 0 &&
swap_conf->swappiness[cgroup] <= 100)
memcg_info_set_swappiness(get_memcg_info(cgroup),
{
int ret;
- resourced_swap_change_memcg_settings(CGROUP_LOW);
+ resourced_swap_change_memcg_settings(MEMCG_LOW);
swap_set_state(SWAP_OFF);
ret = swap_init();
*/
if (lowmem_fragmentated()) {
if (zram_compact) {
- lowmem_trigger_swap_reclaim(CGROUP_ROOT, zram_control.zram_reclaim_bytes);
+ lowmem_trigger_swap_reclaim(MEMCG_ROOT, zram_control.zram_reclaim_bytes);
zram_compact = false;
} else {
swap_zram_compact();
return r;
swapcg_usage_ratio = (float)(swap_usage_bytes / (swap_total_bytes - swap_available_bytes) *100);
if (swapcg_usage_ratio > SWAPCG_CHECK_RATIO)
- type = CGROUP_LOW;
+ type = MEMCG_LOW;
else
- type = CGROUP_ROOT;
+ type = MEMCG_ROOT;
lowmem_trigger_swap_reclaim(type, zram_control.zram_reclaim_bytes);
zram_compact = false;
* So, it requires to trigger proactive oom killer.
*/
- lowmem_trigger_swap_reclaim(CGROUP_ROOT, swap_size_bytes);
+ lowmem_trigger_swap_reclaim(MEMCG_ROOT, swap_size_bytes);
return -ENOSPC;
}
pci->pid = pid;
/* fixed memory cgroup */
- if (pci->mem_type != CGROUP_TOP) {
+ if (pci->mem_type != MEMCG_TOP) {
proc_set_oom_score_adj(pid, cgroup_get_lowest_oom_score_adj(pci->mem_type), NULL);
}
/* fixed cpu cgroup */
- if (pci->cpu_type != CGROUP_TOP) {
+/* if (pci->cpu_type != CGROUP_TOP) {
cpu_move_cgroup_foreach(pid, NULL, CPU_CGROUP_PATH(pci->cpu_type));
- }
+ }*/
memset(&attr, 0, sizeof(struct sched_attr));
attr.size = sizeof(struct sched_attr);
{
if (level >= 0 && value >= 0)
{
- expect_value(memcg_set_threshold, type, CGROUP_ROOT);
+ expect_value(memcg_set_threshold, type, MEMCG_ROOT);
expect_value(memcg_set_threshold, level, level);
expect_value(memcg_set_threshold, value, value);
}
{
if (value >= 0)
{
- expect_value(memcg_set_leave_threshold, type, CGROUP_ROOT);
+ expect_value(memcg_set_leave_threshold, type, MEMCG_ROOT);
expect_value(memcg_set_leave_threshold, value, value);
}
{
expect_value(lowmem_trigger_reclaim, flags, OOM_NOMEMORY_CHECK);
expect_value(lowmem_trigger_reclaim, victims, MAX_MEMORY_CGROUP_VICTIMS);
- expect_value(lowmem_trigger_reclaim, type, CGROUP_LOW);
+ expect_value(lowmem_trigger_reclaim, type, MEMCG_LOW);
expect_value(lowmem_trigger_reclaim, threshold, 0);
env.trigger_signal_oom_trigger(g_variant_new("()"));