%{confdir}/configs/config-limiter.conf
%{confdir}/configs/config-optimizer.conf
%{confdir}/configs/config-process.conf
-#%{confdir}/configs/config-swap.conf
-#%{confdir}/configs/config-dedup.conf
-#%{confdir}/configs/config-block.conf
%{confdir}/configs/config-monitor.conf
-#%{confdir}/configs/config-cpu.conf
-#%{confdir}/configs/config-cpu-sched.conf
%files config-tv
%manifest resourced.manifest
${RESOURCE_MONITOR_SOURCE_DIR}
${CPU_LIMITER_SOURCE_DIR}
${MEMORY_LIMITER_SOURCE_DIR}
- #${MEMORY_LIMITER_SOURCE_DIR}/include
${PROCESS_SOURCE_DIR}
${BLOCK_SOURCE_DIR}
${FREEZER_SOURCE_DIR}
{CGROUP_LOW_NAME, CGROUP_MEDIUM, CGROUP_DEFAULT_USE_HIERARCHY, NULL, NULL},
};
-//static struct cgroup **cgroup_tree;
-
-/*static void cgroup_init(struct cgroup *cgroup)
-{
- cgroup->use_hierarchy = MEMCG_DEFAULT_USE_HIERARCHY;
- cgroup->memcg_info = NULL;
- cgroup->cgroups = NULL;
-}*/
-
int cgroup_get_type(int oom_score_adj)
{
if (oom_score_adj == OOMADJ_SERVICE_MIN)
struct cgroup *get_cgroup_tree(int idx)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) cgroup tree is NULL", idx);
+ _E("[CGROUP] (%d) cgroup tree is NULL", idx);
return NULL;
}
else
void set_memcg_info(int idx, struct memcg_info *mi)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) index is out of scope", idx);
+ _E("[CGROUP] (%d) index is out of scope", idx);
}
else
cgroup_tree[idx].memcg_info = mi;
struct memcg_info *get_memcg_info(int idx)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) cgroup tree's memcg info is NULL", idx);
+ _E("[CGROUP] (%d) cgroup tree's memcg info is NULL", idx);
return NULL;
}
else
GSList *get_child_cgroups(int idx)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) cgroup tree's child is NULL", idx);
+ _E("[CGROUP] (%d) cgroup tree's child is NULL", idx);
return NULL;
}
else
int get_parent_cgroup(int idx)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) cgroup range is out of scope", idx);
+ _E("[CGROUP] (%d) cgroup range is out of scope", idx);
return CGROUP_TOP;
}
else {
void set_use_hierarchy(int idx, bool use_hierarchy)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) cgroup range is out of scope", idx);
+ _E("[CGROUP] (%d) cgroup range is out of scope", idx);
}
else {
cgroup_tree[idx].use_hierarchy = use_hierarchy;
bool get_use_hierarchy(int idx)
{
if(idx < CGROUP_ROOT || idx >= CGROUP_END) {
- _E("[DEBUG] (%d) cgroup range is out of scope", idx);
+ _E("[CGROUP] (%d) cgroup range is out of scope", idx);
return CGROUP_DEFAULT_USE_HIERARCHY;
}
else {
void memcg_set_threshold(int type, int level, int value)
{
-// cgroup_tree[type]->memcg_info->threshold[level] = value;
struct memcg_info *mi = get_memcg_info(type);
if(!mi)
_E("memory cgroup of %d is NULL", type);
_E("memory cgroup of %d is NULL", type);
else
mi->threshold_leave = value;
-
-// cgroup_tree[type]->memcg_info->threshold_leave = value;
}
void memcg_info_set_limit(struct memcg_info *mi, float ratio,
return 0;
}
-/*void memcg_init(struct memcg *memcg)
-{
- memcg->use_hierarchy = MEMCG_DEFAULT_USE_HIERARCHY;
- memcg->info = NULL;
- memcg->cgroups = NULL;
-}*/
-
int memcg_get_anon_usage(char *memcg, unsigned int *anon_usage)
{
int r;
return memcg_root;
}
-/*void memcg_params_exit(void)
-{
- for (int i = CGROUP_ROOT; i < CGROUP_END; i++) {
- g_slist_free_full(cgroup_tree[i].cgroups, free);
- free(cgroup_tree[i]);
- }
- free(cgroup_tree);
-}*/
-
void memcg_params_init(void)
{
int idx = 0;
GSList *child_cgroups;
-/* cgroup_tree = (struct cgroup **)malloc(sizeof(struct cgroup *) * CGROUP_END);
- assert(cgroup_tree);*/
-
for (idx = CGROUP_ROOT; idx < CGROUP_END; idx++) {
struct memcg_info *mi = &gmemcg_info[idx];
-/* cgroup_tree[idx] = (struct cgroup *)malloc(sizeof(struct cgroup));
- assert(cgroup_tree[idx]);*/
-
-// cgroup_init(cgroup_tree[idx]);
+
set_memcg_info(idx, mi);
if(idx == CGROUP_ROOT)
memcg_root = mi;
set_use_hierarchy(parent_idx, true);
}
-// cgroup_tree[idx]->memcg_info = mi;
_I("init memory cgroup for %s", mi->name);
-/* if (mi->parent_memcg == CGROUP_TOP) {
- memcg_root = cgroup_tree[idx]->memcg_info;
- } else {
- int parent_idx = mi->parent_memcg;
- cgroups = cgroup_tree[parent_idx]->cgroups;
- cgroups = g_slist_prepend(cgroups, mi);
- cgroup_tree[parent_idx]->use_hierarchy = true;
- }*/
}
}
#define MEMCG_MEDIUM_PATH MEMCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME "/" CGROUP_MEDIUM_NAME
#define MEMCG_LOW_PATH MEMCG_PATH "/" CGROUP_VIP_NAME "/" CGROUP_HIGH_NAME "/" CGROUP_MEDIUM_NAME "/" CGROUP_LOW_NAME
-/*#define MEMCG_APPS_PATH MEMCG_PATH"/Apps"
-#define MEMCG_BGLOCKED_PATH MEMCG_PATH"/Apps/BgLocked"
-#define MEMCG_SWAP_PATH MEMCG_PATH"/Swap"*/
-
#define MEMCG_VIP_PP_PATH MEMCG_VIP_PATH "/" CGROUP_PER_PROCESS_NAME
#define MEMCG_VIP_GROUP_PATH MEMCG_VIP_PATH "/" CGROUP_GROUP_NAME
#define MEMCG_LOW_PP_PATH MEMCG_LOW_PATH "/" CGROUP_PER_PROCESS_NAME
#define MEMCG_LOW_GROUP_PATH MEMCG_LOW_PATH "/" CGROUP_GROUP_NAME
-
-/*#define LOWMEM_ROOT_CGROUP "/sys/fs/cgroup/memory"
-#define LOWMEM_APPS_CGROUP LOWMEM_ROOT_CGROUP"/Apps"
-#define LOWMEM_BGLOCKED_CGROUP LOWMEM_ROOT_CGROUP"/Apps/BgLocked"
-#define LOWMEM_MEMLIMIT_CGROUP LOWMEM_ROOT_CGROUP"/MemLimit"
-#define LOWMEM_SWAP_CGROUP LOWMEM_ROOT_CGROUP"/Swap"*/
-
-//#define MEMCG_OOM_CONTROL_PATH "memory.oom_control"
#define MEMCG_OOM_CONTROL "memory.oom_control"
#define MEMCG_EVENTFD_CONTROL "cgroup.event_control"
#define MEMCG_EVENTFD_MEMORY_PRESSURE "memory.pressure_level"
#define MEMCG_SWAPPINESS "memory.swappiness"
#define MEMCG_FORCE_RECLAIM "memory.force_reclaim"
#define MEMCG_MOVE_CHARGE "memory.move_charge_at_immigrate"
-//#define MEMCG_LIMIT_PATH "memory.limit_in_bytes"
-//#define MEMCG_SWAP_LIMIT_PATH "memory.memsw.limit_in_bytes"
-//#define MEMCG_SWAPPINESS_PATH "memory.swappiness"
#define DEFAULT_MEMLOG_PATH "/var/log"
#define DEFAULT_MEMLOG_NR_MAX 50
};
-//separate memcg_info and cgroup_info
struct memcg_info {
/* name of memory cgroup */
char name[MAX_PATH_LENGTH];
/* hashname of memory cgroup for restoring memcg info*/
-// char hashname[MAX_NAME_LENGTH];
/* parent id */
-// int parent_memcg;
/* limit ratio, if don't want to set limit, use NO_LIMIT*/
float limit_ratio;
unsigned int limit;
int swappiness;
};
-/*struct memcg {
- // parent cgroup
- struct memcg_info *info;
- // set when using multiple sub cgroups
- bool use_hierarchy;
- // list of child cgroups when using multi groups
- GSList *cgroups;
-};*/
-
struct lowmem_control_data {
enum lowmem_control_type control_type;
int pid;
long long value[CGROUP_MEMORY_STAT_MAX];
};
-//void memcg_init(struct memcg *memcg);
-
-
const char *cgroup_memory_stat_id_to_string(enum cgroup_memory_stat_id id);
enum cgroup_memory_stat_id cgroup_memory_stat_string_to_id(const char *str);
/* open conf file */
f = fopen(file_name, "r");
if (!f) {
- _E("[DEBUG] Failed to open file %s", file_name);
+ _E("Failed to open file %s", file_name);
ret = -EIO;
goto error;
}
}
}
}
- _D("[DEBUG] Success to load %s", file_name);
- fclose(f);
return 0;
error:
if (f)
fclose(f);
- _E("[DEBUG] Failed to read %s:%d!", file_name, lineno);
+ _E("Failed to read %s:%d!", file_name, lineno);
return ret;
}
ret = proc_get_approx_mem_usage(pid, &total);
if (ret < 0) {
- _E("[DEBUG] Failed to get usage : %d", pid);
+ _E("Failed to get usage : %d", pid);
return ret;
}
ret = proc_get_approx_mem_usage(pid, &total);
if (ret < 0) {
- _E("[DEBUG] Failed to get usage : %d", pid);
+ _E("Failed to get usage : %d", pid);
return ret;
}
int type = bmi->logging;
if (type & BLOCK_LOGGING_DLOG)
- _I("[DEBUG] pid %d(%s) accessed %s", pid, label, filename);
+ _I("pid %d(%s) accessed %s", pid, label, filename);
if (type & BLOCK_LOGGING_FILE) {
FILE *f;
if (!bmi || !strlen(bmi->path))
return RESOURCED_ERROR_NO_DATA;
- _D("[DEBUG] monitor register : path %s, mode %08x", bmi->path, bmi->mode);
+ _D("monitor register : path %s, mode %08x", bmi->path, bmi->mode);
bmi->mfd = fanotify_init(FAN_CLOEXEC|FAN_NONBLOCK | FAN_CLASS_CONTENT,
O_RDONLY | O_LARGEFILE | O_CLOEXEC | O_NOATIME);
config_parse(PRIORITY_CONF_FILE, load_fixed_oom_config, NULL);
-// register_notifier(RESOURCED_NOTIFIER_APP_LAUNCH, proc_priority_set_fixed_oom);
register_notifier(RESOURCED_NOTIFIER_APP_TERMINATED, proc_priority_remove_pid);
return RESOURCED_ERROR_NONE;
}
g_hash_table_destroy(oom_fixed_app_list);
if (oom_fixed_pid_list)
g_hash_table_destroy(oom_fixed_pid_list);
-// unregister_notifier(RESOURCED_NOTIFIER_APP_LAUNCH, proc_priority_set_fixed_oom);
unregister_notifier(RESOURCED_NOTIFIER_APP_TERMINATED, proc_priority_remove_pid);
return RESOURCED_ERROR_NONE;
}
resourced_ret_c proc_set_runtime_exclude_list(const int pid, int type, struct proc_app_info *pai)
{
_cleanup_free_ char *runtime_app_info_path = NULL;
-// struct proc_app_info *pai = NULL;
struct proc_status ps = {0};
int ret;
-// pai = find_app_info(pid);
if (!pai) {
- _E("[DEBUG] process app info is NULL");
+ _E("process app info is NULL");
return RESOURCED_ERROR_NO_DATA;
}
} else {
pai->runtime_exclude = type;
}
- _D("[DEBUG] pid %d set proc exclude list, type = %d, exclude = %d",
+ _D("pid %d set proc exclude list, type = %d, exclude = %d",
pid, type, pai->runtime_exclude);
runtime_app_info_path = proc_get_runtime_app_info_path(pai);
ppi->app_list = g_slist_prepend(ppi->app_list, pai);
else {
ppi->svc_list = g_slist_prepend(ppi->svc_list, pai);
-/* if (!restore)
- proc_set_default_svc_oomscore(ppi, pai);*/
}
return ppi;
}
prelaunch = NULL;
if (!streq(pai->appid, appid)) {
- _E("[DEBUG] prelaunched app(%s) is not matched with new app info(%s)",
+ _E("prelaunched app(%s) is not matched with new app info(%s)",
pai->appid, appid);
proc_remove_app_info(pai);
} else {
/* prelaunch */
if (prelaunch) {
- _E("[DEBUG] prelaunched app was not gone to launch state, appid(%s)", prelaunch->appid);
+ _E("prelaunched app was not gone to launch state, appid(%s)", prelaunch->appid);
proc_remove_app_info(prelaunch);
prelaunch = NULL;
}
if (!strncmp(result->section, APP_WATCHDOG_EXCLUDE_CONF_SECTION,
strlen(APP_WATCHDOG_EXCLUDE_CONF_SECTION) + 1) &&
!strncmp(result->name, "PREDEFINE", 10)) {
- _I("[WATCHDOG] app (%s) is excluded from the watchdog list", result->value);
g_hash_table_insert(app_watchdog_exclude_list,
g_strndup(result->value, strlen(result->value)),
GINT_TO_POINTER(1));
gslist_for_each_item(iter, proc_module) {
module = (struct proc_module_ops *)iter->data;
- _D("[WATCHDOG] Initialize [%s] module\n", module->name);
if (module->init)
ret = module->init(data);
if (ret != RESOURCED_ERROR_NONE)
return PROC_STATE_DEFAULT;
}
-static const char *convert_status_to_str(int status)
-{
- static const char *set[] = {"foreground", "active", "backround",
- "inactive", "launch", "resume", "terminate",
- "service", "noti", "exclude", "memsweep", "terminated",
- "system_service", "cmdline", "exe", "stat", "status",
- "oomscore", "pgid_cmdline"};
- if(status < PROC_CGROUP_SET_FOREGRD || status > PROC_CGROUP_GET_PGID_CMDLINE)
- return "error status";
- else
- return set[status];
-}
-
int resourced_proc_status_change(int status, pid_t pid, char *app_name, char *pkg_name, int apptype)
{
int ret = 0, oom_score_adj = 0;
ps.pid = pid;
ps.pai = NULL;
- _I("[DEBUG] status: %s, app_name: %s, pid: %d, oom_score: %d", convert_status_to_str(status), app_name, pid, oom_score_adj);
-
switch (status) {
case PROC_CGROUP_SET_FOREGRD:
if (app_name)
case PROC_CGROUP_SET_RESUME_REQUEST:
/* init oom_score_value */
if (!app_name) {
- _E("[DEBUG] resume request: need app name! pid = %d", pid);
+ _E("resume request: need app name! pid = %d", pid);
return RESOURCED_ERROR_NO_DATA;
}
- _SD("[DEBUG] resume request: app %s, pid %d", app_name, pid);
+ _SD("resume request: app %s, pid %d", app_name, pid);
ps.pai = find_app_info(pid);
if (!ps.pai)
* `notify_on_release` to 0) is bound to be slower than this. */
static const char AGENT_REPLACEMENT_PATH[] = "/bin/true";
if (fwrite_str(PROC_WATCHDOGCG_PATH "/" RELEASE_AGENT, AGENT_REPLACEMENT_PATH) == RESOURCED_ERROR_NONE) {
- _I("[DEBUG] disabled release agent by setting to %s", AGENT_REPLACEMENT_PATH);
+ _I("disabled release agent by setting to %s", AGENT_REPLACEMENT_PATH);
return;
}
* system is shutting down and nobody else should use the agent
* anyway so there isn't much opportunity to cause any surprises. */
if (!mount(AGENT_REPLACEMENT_PATH, PROC_WATCHDOG_HANDLER_PATH, NULL, MS_BIND | MS_RDONLY, NULL)) {
- _I("[DEBUG] disabled release agent by bind mounting %s to %s", AGENT_REPLACEMENT_PATH, PROC_WATCHDOG_HANDLER_PATH);
+ _I("disabled release agent by bind mounting %s to %s", AGENT_REPLACEMENT_PATH, PROC_WATCHDOG_HANDLER_PATH);
return;
}
- _E("[DEBUG] disabling release agent by bind mounting %s to %s failed; release agent is potentially still alive", AGENT_REPLACEMENT_PATH, PROC_WATCHDOG_HANDLER_PATH);
+ _E("disabling release agent by bind mounting %s to %s failed; release agent is potentially still alive", AGENT_REPLACEMENT_PATH, PROC_WATCHDOG_HANDLER_PATH);
}
static void turn_off_proc_watchdog()
* the unmounting can be done lazily and is not guaranteed to take place
* immediately. */
umount2(PROC_WATCHDOGCG_PATH, MNT_FORCE | MNT_DETACH);
- _I("[DEBUG] disabled process by unmounting release cgroup");
+ _I("disabled process by unmounting release cgroup");
}
static void proc_poweroff()
pai = find_app_info(pid);
if (!pai) {
- _D("[DEBUG] There is no appid %d", pid);
+ _D("There is no appid %d", pid);
goto response;
}
pai = find_app_info_by_appid(appid);
if (!pai) {
- _E("[DEBUG] There is no appid %s", appid);
+ _E("There is no appid %s", appid);
goto failure;
}
pai = find_app_info_by_appid(appid);
if (!pai || !pai->main_pid) {
- _E("[DEBUG] There is no appid %s", appid);
+ _E("There is no appid %s", appid);
goto failure;
}
ps.pai = find_app_info_by_appid(appid);
if (!ps.pai) {
- _E("[DEBUG] no entry of %s in app list", appid);
+ _E("no entry of %s in app list", appid);
return;
}
ps.pid = ps.pai->main_pid;
EXPORT_TEST void booting_done_signal_handler(GVariant *params)
{
- _I("[DEBUG] booting done");
+ _I("booting done");
modules_init_late(NULL);
resourced_notify(RESOURCED_NOTIFIER_BOOTING_DONE, NULL);
}
int freeze_val = 0;
GSList *iter;
struct proc_program_info *ppi;
-// struct proc_app_info *pai = find_app_info(currentpid);
int is_favorite = 0;
if (proc_get_freezer_status() != CGROUP_FREEZER_DISABLED)
freeze_val = resourced_freezer_proc_late_control();
if (!pai) {
- _E("[DEBUG] can't find app info about pid %d", currentpid);
+ _E("can't find app info about pid %d", currentpid);
return RESOURCED_ERROR_INVALID_PARAMETER;
}
{
int ret = 0;
struct proc_program_info *ppi;
-// struct proc_app_info *pai;
-// pai = find_app_info(pid);
if (!pai) {
- _E("[DEBUG] process app info is NULL");
+ _E("process app info is NULL");
proc_set_oom_score_adj(pid, oom_score_adj, pai);
return RESOURCED_ERROR_NO_DATA;
}
_D("cpu-sched: add pid %d to cpuset %s", pid, set->name);
cgroup_write_pid(CPUSET_CGROUP, set->name, pid);
-/* char path[PATH_MAX];
- int r = snprintf(path, sizeof path, "%s/%s", CPUSET_CGROUP, set->name);
- if (r < 0) {
- _E("cpu-sched: failed to setup path for cpuset (%s)", set->name);
- return r;
- }
-
- r = cgroup_write_node_int32(path, "cgroup.procs", pid);
- ret_value_msg_if(r < 0, RESOURCED_ERROR_FAIL, "Failed to attach pid %d to cgroup %s: %m", pid, path);*/
return RESOURCED_ERROR_NONE;
}
_D("cpu-sched: moving pid %d to toplevel cpuset (from %s cpuset)", pid, set->name);
cgroup_write_pid_fullpath(CPUSET_CGROUP, pid);
-/* int r = cgroup_write_node_int32(CPUSET_CGROUP, "cgroup.procs", pid);
- ret_value_msg_if(r < 0, RESOURCED_ERROR_FAIL, "Failed to attach pid %d to cgroup " CPUSET_CGROUP ": %m", pid);*/
return RESOURCED_ERROR_NONE;
}
static int cpu_move_cgroup(pid_t pid, char *path)
{
return cgroup_write_pid_fullpath(path, pid);
- //return cgroup_write_node_uint32(path, CGROUP_FILE_NAME, pid);
}
static int cpu_move_cgroup_foreach(pid_t pid, struct proc_app_info *pai, char *path)
if (!pai)
return cgroup_write_pid_fullpath(path, pid);
-// return cgroup_write_node_uint32(path, CGROUP_FILE_NAME, pid);
cgroup_write_pid_fullpath(path, pai->main_pid);
-// cgroup_write_node_uint32(path, CGROUP_FILE_NAME, pai->main_pid);
if (pai->childs) {
gslist_for_each_item(iter, pai->childs) {
child_pid = GPOINTER_TO_PID(iter->data);
cgroup_write_pid_fullpath(path, child_pid);
-// cgroup_write_node_uint32(path, CGROUP_FILE_NAME, child_pid);
}
}
return RESOURCED_ERROR_NONE;
{
int ret_code;
- _D("[DEBUG] resourced cpu init start");
- //ret_code = cgroup_make_subdir(CPUCG_PATH, "background", NULL);
+ _D("resourced cpu init start");
ret_code = cgroup_make_full_subdir(CPUCG_PATH);
ret_value_msg_if(ret_code < 0, ret_code, "cpu cgroup init failed\n");
cpu_check_cpuquota();
-/* if (cpu_quota_enabled()) {
- ret_code = cgroup_make_subdir(CPUCG_PATH, "quota", NULL);
- ret_value_msg_if(ret_code < 0, ret_code, "create service cgroup failed\n");
- }*/
config_parse(CPU_CONF_FILE, load_cpu_config, NULL);
if (def_list.num) {
ret_unless(level >= 0);
ret_unless(thres >= 0);
-// lowmem_memcg_set_threshold(CGROUP_ROOT, level, thres);
memcg_set_threshold(CGROUP_ROOT, level, thres);
}
g_variant_get(params, gtype, &thres);
ret_unless(thres >= 0);
-// lowmem_memcg_set_leave_threshold(CGROUP_ROOT, thres);
memcg_set_leave_threshold(CGROUP_ROOT, thres);
}
#define MAX_MEMORY_CGROUP_VICTIMS 10
-/*enum lmk_type {
- LMK_MEMORY, // Kill all range of apps OOMADJ_INIT ~ OOMADJ_APP_MAX
- LMK_FOREGROUND, // Kill foreground apps OOMADJ_INIT ~ OOMADJ_BACKGRD_PERCEPTIBLE
- LMK_ACTIVE, // Kill active apps OOMADJ_PREVIOUS_DEFAULT ~ OOMADJ_BACKGRD_LOCKED
- LMK_RECENTLY_USE, // Kill recently use apps OOMADJ_FAVORITE ~ OOMADJ_RECENTLY_USED
- LMK_OLDEST// Kill only oldest inactive processes OOMADJ_BACKGRD_OLD ~ OOMADJ_APP_MAX
-};*/
-
struct task_info {
/*
* Mostly, there are not multiple processes with the same pgid.
int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold);
void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size);
void lowmem_change_memory_state(int state, int force);
-//void lowmem_memcg_set_threshold(int idx, int level, int value);
-//void lowmem_memcg_set_leave_threshold(int idx, int value);
unsigned long lowmem_get_ktotalram(void);
void lowmem_trigger_swap(pid_t pid, int memcg_idx);
void lowmem_limit_init(void);
/*
* Return memcg pointer to selected cgroup.
*/
-//int lowmem_get_memcg(enum cgroup_type type, struct memcg **memcg_ptr);
enum oom_killer_cb_flags {
OOM_NONE = 0x0, /* for main oom killer thread */
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
cgroup_write_pid_fullpath(path, pai->main_pid);
-// cgroup_write_node_uint32(path, CGROUP_FILE_NAME, pai->main_pid);
if (pai->childs) {
gslist_for_each_item(iter, pai->childs)
cgroup_write_pid_fullpath(path, GPOINTER_TO_PID(iter->data));
-/* cgroup_write_node_uint32(path, CGROUP_FILE_NAME,
- GPOINTER_TO_PID(iter->data));*/
}
return RESOURCED_ERROR_NONE;
}
} else {
ret = proc_get_cmdline(pid, appname, sizeof appname);
if (ret < 0) {
- _E("[DEBUG] Failed to get cmdline basename of pid(%d)", pid);
+ _E("Failed to get cmdline basename of pid(%d)", pid);
return;
}
cgpath = appname;
ret = asprintf(&path, "%s/%s", MEMCG_HIGH_PP_PATH, cgpath);
if (ret < 0) {
- _E("[DEBUG] not enough memory");
+ _E("not enough memory");
return;
}
- _I("[DEBUG] path=%s/%s", MEMCG_HIGH_PP_PATH, cgpath);
+ _I("path=%s/%s", MEMCG_HIGH_PP_PATH, cgpath);
ret = cgroup_make_subdir(MEMCG_HIGH_PP_PATH, cgpath, NULL);
if (ret < 0) {
- _E("[DEBUG] Failed to create cgroup subdir '%s/%s'",
+ _E("Failed to create cgroup subdir '%s/%s'",
MEMCG_HIGH_PP_PATH, cgpath);
return;
}
ret = cgroup_write_node_uint32(path, MEMCG_MOVE_CHARGE, 3U);
if (ret < 0)
- _W("[DEBUG] Failed to set immigrate mode for %s (non-crucial, continuing)", path);
+ _W("Failed to set immigrate mode for %s (non-crucial, continuing)", path);
if (!pai) {
cgroup_write_pid_fullpath(path, pid);
(mem_widget_limit && ps->pai->type == PROC_TYPE_WIDGET))
return lowmem_limit_appwidget(data);
- _E("[DEBUG] Unable to set foreground app limit - app type not supported");
+ _E("Unable to set foreground app limit - app type not supported");
return RESOURCED_ERROR_NONE;
}
#define LOWMEM_THRES_INIT 0
#define MEMPS_EXEC_PATH "usr/bin/memps"
-//#define MEMCG_MOVE_CHARGE_PATH "memory.move_charge_at_immigrate"
-//#define MEMCG_EVENTFD_MEMORY_PRESSURE "memory.pressure_level"
#define MEM_CONF_FILE RD_CONFIG_FILE(limiter)
#define MEM_SECTION "Memory"
#define MEM_VIP_SECTION "VIP_PROCESS"
/* Processing flags*/
unsigned int flags;
/* Indictator for OOM score of targeted processes */
-// enum lmk_type type;
enum cgroup_type type;
/* Desired size to be restored - level to be reached (MB)*/
static size_t cur_mem_state = LOWMEM_NORMAL;
static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
-//static int default_swappiness = -1;
static unsigned long totalram;
static unsigned long ktotalram;
return "Error";
}
-/*static const char *convert_type_to_str(int type)
-{
- static const char *type_table[] =
- {"memory", "foreground", "active", "recently_use", "oldest"};
- if(type >= LMK_MEMORY && type <= LMK_OLDEST)
- return type_table[type];
- return "error type";
-}*/
-
static const char *convert_status_to_str(int status)
{
static const char *status_table[] =
if (!memps_log)
make_memps_log(MEMLOG_MEMPS, pid, appname);
-
-// pai = find_app_info(pid);
pai = tsk->pai;
-
if (pai) {
resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
pid, NULL, NULL, PROC_TYPE_NONE);
*avail = available;
- _I("[LMK] should_be_freed=%uMB[== threshold(%u) - available(%u) + margin(%u)]", should_be_freed, thres, available, THRESHOLD_MARGIN);
return should_be_freed;
}
* and pid has been already included in pai,
* skip to append.
*/
-/* if (!add_app && find_app_info(pid))
- continue;*/
if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
continue;
lowmem_get_pids_proc(candidates);
g_array_sort(candidates, (GCompareFunc)compare_victims);
-/* if (start_oom <= OOMADJ_BACKGRD_LOCKED || start_oom >= OOMADJ_BACKGRD_OLD)
- g_array_sort(candidates, (GCompareFunc)compare_victims_point);
- else
- g_array_sort(candidates, (GCompareFunc)compare_victims);*/
for (i = 0; i < candidates->len; i++) {
struct task_info *tsk;
static int calculate_range_of_oom(enum cgroup_type type, int *min, int *max)
{
if (type == CGROUP_VIP || type >= CGROUP_END || type <= CGROUP_TOP) {
- _E("[DEBUG] cgroup type (%d) is out of scope", type);
+ _E("cgroup type (%d) is out of scope", type);
return RESOURCED_ERROR_FAIL;
}
return RESOURCED_ERROR_NONE;
}
-/*static void calualate_range_of_oom(enum lmk_type lmk, int *min, int *max)
-{
- if (lmk == LMK_OLDEST) {
- *max = OOMADJ_APP_MAX;
- *min = OOMADJ_BACKGRD_OLD;
- } else if (lmk == LMK_RECENTLY_USE) {
- *max = OOMADJ_RECENTLY_USED;
- *min = OOMADJ_FAVORITE;
- } else if (lmk == LMK_ACTIVE) {
- *max = OOMADJ_BACKGRD_LOCKED;
- *min = OOMADJ_PREVIOUS_DEFAULT;
- } else if (lmk == LMK_FOREGROUND) {
- *max = OOMADJ_BACKGRD_PERCEPTIBLE;
- *min = OOMADJ_SU;
- } else {
- *max = OOMADJ_APP_MAX;
- *min = OOMADJ_SU;
- }
-}*/
-
static void lowmem_handle_request(struct lowmem_control *ctl)
{
int start_oom, end_oom;
unsigned int total_size = 0;
unsigned int current_size = 0;
unsigned int reclaim_size, shortfall = 0;
-// enum lmk_type lmk_type = ctl->type;
enum cgroup_type cgroup_type = ctl->type;
available = proc_get_mem_available();
status = LOWMEM_RECLAIM_DONE;
goto done;
}
- _D("[LMK] before reclaim: available=%uMB, type=%s",
- available, convert_cgroup_type_to_str(cgroup_type));
/* precaution */
current_size = 0;
_I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
victim_cnt, current_size,
start_oom, end_oom, convert_status_to_str(status));
- //_I("[LMK] total: killed %d victims reclaimed = %uMB remaining = %uMB",
- // count, total_size, reclaim_size);
}
if ((status == LOWMEM_RECLAIM_DONE) ||
static void change_lowmem_state(unsigned int mem_state)
{
- _I("[LOW MEM STATE] %s ==> %s", convert_memstate_to_str(cur_mem_state),
- convert_memstate_to_str(mem_state));
cur_mem_state = mem_state;
lmk_start_threshold = get_root_memcg_info()->threshold[LOWMEM_MEDIUM];
struct swap_status_msg msg;
mi = get_memcg_info(memcg_idx);
-// mi = memcg_tree[memcg_idx]->info;
- _D("[DEBUG] name : %s, pid : %d", mi->name, pid);
+ _D("name : %s, pid : %d", mi->name, pid);
error = proc_get_oom_score_adj(pid, &oom_score_adj);
if (error) {
- _E("[DEBUG] Cannot get oom_score_adj of pid (%d)", pid);
+ _E("Cannot get oom_score_adj of pid (%d)", pid);
return;
}
proc_set_oom_score_adj(pid, oom_score_adj, NULL);
- //cgroup_write_pid_fullpath(mi->name, pid);
- //cgroup_write_node_uint32(mi->name, CGROUP_FILE_NAME, pid);
msg.type = memcg_idx;
msg.memcg_info = mi;
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, &msg);
if (swap_get_state() == SWAP_ON && memcg_swap_status) {
msg.type = CGROUP_LOW;
msg.memcg_info = get_memcg_info(msg.type);
-// msg.info = memcg_tree[msg.type]->info;
resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, &msg);
memcg_swap_status = false;
}
{
struct lowmem_control *ctl;
- _I("[LOW MEM STATE] memory cgroup %s oom state",
- mi->name);
-
/* To Do: only start to kill fg victim when no pending fg victim */
lowmem_dump_cgroup_procs(mi);
if (strncmp(result->section, MEM_BG_RECLAIM_SECTION, strlen(MEM_BG_RECLAIM_SECTION)+1))
return RESOURCED_ERROR_NONE;
- else {
- _I("[DEBUG] SECTION=%s", result->section);
- }
if (!strncmp(result->name, MEM_BG_RECLAIM_STRING, strlen(MEM_BG_RECLAIM_STRING)+1)) {
if (!strncmp(result->value, "yes", strlen("yes")+1))
if (strncmp(result->section, MEM_POPUP_SECTION, strlen(MEM_POPUP_SECTION)+1))
return RESOURCED_ERROR_NONE;
- else {
- _I("[DEBUG] SECTION=%s", result->section);
- }
if (!strncmp(result->name, MEM_POPUP_STRING, strlen(MEM_POPUP_STRING)+1)) {
if (!strncmp(result->value, "yes", strlen("yes")+1))
if (strncmp(result->section, MEM_LOGGING_SECTION, strlen(MEM_LOGGING_SECTION)+1))
return RESOURCED_ERROR_NONE;
- else {
- _I("[DEBUG] SECTION=%s", result->section);
- }
if (!strncmp(result->name, "Enable", strlen("Enable")+1)) {
memlog_enabled = atoi(result->value);
return RESOURCED_ERROR_NONE;
}
-//static int set_memory_config(const char *section_name, const struct parse_result *result)
static int set_memory_config(struct parse_result *result, void *user_data)
{
-// if (!result || !section_name)
if (!result)
return RESOURCED_ERROR_NONE;
if (strncmp(result->section, MEM_SECTION, strlen(MEM_SECTION)+1))
-// && strncmp(result->section, section_name, strlen(section_name)+1))
return RESOURCED_ERROR_NONE;
- else {
- _I("[DEBUG] SECTION=%s", result->section);
- }
if (!strncmp(result->name, "ThresholdDedup", strlen("ThresholdDedup")+1)) {
int value = atoi(result->value);
} else if (!strncmp(result->name, "ForegroundRatio", strlen("ForegroundRatio")+1)) {
float ratio = atof(result->value);
memcg_info_set_limit(get_memcg_info(CGROUP_HIGH), ratio, totalram);
-// memcg_info_set_limit(memcg_tree[CGROUP_HIGH]->info, ratio, totalram);
} else if (!strncmp(result->name, "BackgroundRatio", strlen("BackgroundRatio")+1)) {
float ratio = atof(result->value);
memcg_info_set_limit(get_memcg_info(CGROUP_MEDIUM), ratio, totalram);
-// memcg_info_set_limit(memcg_tree[CGROUP_MEDIUM]->info, ratio, totalram);
} else if (!strncmp(result->name, "LowRatio", strlen("LowRatio")+1)) {
float ratio = atof(result->value);
memcg_info_set_limit(get_memcg_info(CGROUP_LOW), ratio, totalram);
-// memcg_info_set_limit(memcg_tree[CGROUP_LOW]->info, ratio, totalram);
} else if (!strncmp(result->name, "NumMaxVictims", strlen("NumMaxVictims")+1)) {
int value = atoi(result->value);
num_max_victims = value;
return RESOURCED_ERROR_OUT_OF_MEMORY;
} else if (!strncmp(result->name, "SWAPPINESS", strlen("SWAPPINESS")+1)) {
int value = atoi(result->value);
-// default_swappiness = value;
memcg_set_default_swappiness(value);
memcg_info_set_swappiness(get_memcg_info(CGROUP_ROOT), value);
-// memcg_info_set_swappiness(memcg_tree[CGROUP_ROOT]->info, value);
} else if (!strncmp(result->name, "FOREGROUND_SWAPPINESS", strlen("FOREGROUND_SWAPPINESS")+1)) {
int value = atoi(result->value);
memcg_info_set_swappiness(get_memcg_info(CGROUP_HIGH), value);
-// memcg_info_set_swappiness(memcg_tree[CGROUP_HIGH]->info, value);
} else if (!strncmp(result->name, "BACKGROUND_SWAPPINESS", strlen("BACKGROUND_SWAPPINESS")+1)) {
int value = atoi(result->value);
memcg_info_set_swappiness(get_memcg_info(CGROUP_MEDIUM), value);
-// memcg_info_set_swappiness(memcg_tree[CGROUP_MEDIUM]->info, value);
} else if (!strncmp(result->name, "LOW_SWAPPINESS", strlen("LOW_SWAPPINESS")+1)) {
int value = atoi(result->value);
memcg_info_set_swappiness(get_memcg_info(CGROUP_LOW), value);
-// memcg_info_set_swappiness(memcg_tree[CGROUP_LOW]->info, value);
} else if (!strncmp(result->name, "NumFragSize", strlen("NumFragSize")+1)) {
fragmentation_size = atoi(result->value);
}
}
-/*static void init_memcg_params(void)
-{
- int idx = 0;
- GSList *cgroups;
-
- memcg_tree = (struct memcg **)malloc(sizeof(struct memcg *) *
- CGROUP_END);
- assert(memcg_tree);
-
- for (idx = CGROUP_ROOT; idx < CGROUP_END; idx++) {
- struct memcg_info *mi = &gmemcg_info[idx];
- memcg_tree[idx] = (struct memcg *)malloc(sizeof(struct memcg));
- assert(memcg_tree[idx]);
-
- memcg_init(memcg_tree[idx]);
- memcg_tree[idx]->info = mi;
- _I("init memory cgroup for %s", mi->name);
-
- if (mi->parent_memcg == CGROUP_TOP) {
- get_root_memcg_info() = memcg_tree[idx]->info;
- } else {
- cgroups = memcg_tree[mi->parent_memcg]->cgroups;
- cgroups = g_slist_prepend(cgroups, mi);
- memcg_tree[mi->parent_memcg]->use_hierarchy = true;
- }
- }
-}*/
-
-
static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
{
int cur_oom_score_adj;
int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
if(next_memcg_idx < CGROUP_VIP || next_memcg_idx > CGROUP_LOW) {
- _E("[DEBUG] cgroup type (%d) should not be called", next_memcg_idx);
+ _E("cgroup type (%d) should not be called", next_memcg_idx);
return;
}
mi = get_memcg_info(next_memcg_idx);
if (cur_oom_score_adj != -1) {
/* VIP processes should not be asked to move. */
if (cur_memcg_idx <= CGROUP_VIP) {
- _E("[DEBUG] current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
+ _E("current cgroup (%s) cannot be VIP or Root", convert_cgroup_type_to_str(cur_memcg_idx));
return;
}
}
- //DEBUG
- _I("[DEBUG] app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
+ _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
if (cur_oom_score_adj == next_oom_score_adj) {
- _D("[DEBUG] next oom_score_adj (%d) is same with current one", next_oom_score_adj);
+ _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
return;
}
int ret = RESOURCED_ERROR_NONE;
if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
- _I("LMK worker thread [%d] has already been created\n",
- (unsigned)lmw.worker_thread);
return ret;
}
static bool lowmem_press_eventfd_handler(int fd, void *data)
{
-// struct cgroup *cgroup;
struct memcg_info *mi;
-// GSList *iter = NULL;
enum cgroup_type type = CGROUP_ROOT;
// FIXME: probably shouldn't get ignored
if (lowmem_press_eventfd_read(fd) < 0)
- _E("[DEBUG] Failed to read lowmem press event, %m\n");
+ _E("Failed to read lowmem press event, %m\n");
for (type = CGROUP_ROOT; type < CGROUP_END; type++) {
if (!get_cgroup_tree(type) || !get_memcg_info(type))
}
return true;
}
-// gslist_for_each_item(iter, memcg_tree[i]->cgroups)
-/* gslist_for_each_item(iter, get_child_cgroups(type))
- {
- cgroup = (struct cgroup *)(iter->data);
- mi = cgroup->memcg_info;
- if (fd == mi->evfd) {
- lowmem_press_cgroup_handler(type, mi);
- _D("[DEBUG] lowmem cgroup handler is called for %s",
- mi->name);
- return true;
- }
- }*/
}
return true;
if (evfd == RESOURCED_ERROR_FAIL) {
int saved_errno = errno;
- _I("fail to register event press fd %s cgroup", name);
+ _E("fail to register event press fd %s cgroup", name);
return -saved_errno;
}
? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
- _I("[DEBUG] reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
+ _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
lowmem_trigger_reclaim(0, victims, type, size);
}
_D("history based proactive LMK : avg rss %u, available %u required = %u MB",
rss, before, size);
- //lowmem_trigger_reclaim(0, victims, LMK_OLDEST, size);
lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
return;
*/
_D("Run threshold based proactive LMK: memory level to reach: %u\n",
proactive_leave + THRESHOLD_MARGIN);
- //lowmem_trigger_reclaim(0, victims, LMK_OLDEST, proactive_leave + THRESHOLD_MARGIN);
lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
}
* impact on the user experience.
*/
msg.type = CGROUP_MEDIUM;
-// msg.info = memcg_tree[msg.type]->info;
msg.memcg_info = get_memcg_info(msg.type);
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, &msg);
static void load_configs(const char *path)
{
- _I("[DEBUG] path=%s", path);
-
if (config_parse(path, set_memory_config, NULL))
- _E("[DEBUG] (%s-mem) parse Fail", path);
+ _E("(%s-mem) parse Fail", path);
if (config_parse(path, load_popup_config, NULL))
- _E("[DEBUG] (%s-popup) parse Fail", path);
+ _E("(%s-popup) parse Fail", path);
if (config_parse(path, load_bg_reclaim_config, NULL))
- _E("[DEBUG] (%s-bg-reclaim) parse Fail", path);
+ _E("(%s-bg-reclaim) parse Fail", path);
if (config_parse(path, load_mem_log_config, NULL))
- _E("[DEBUG] (%s-mem-log) parse Fail", path);
+ _E("(%s-mem-log) parse Fail", path);
if (config_parse(path, vip_load_config, NULL))
- _E("[DEBUG] (%s-vip) parse Fail", path);
+ _E("(%s-vip) parse Fail", path);
}
static void load_per_vendor_configs(void)
struct dirent **namelist;
if ((count = scandir(CGROUP_VIP_LIST_DIR, &namelist, NULL, alphasort)) == -1) {
- _W("[DEBUG] failed to opendir (%s)", CGROUP_VIP_LIST_DIR);
+ _W("failed to opendir (%s)", CGROUP_VIP_LIST_DIR);
return;
}
{
/* print info of Memory section */
for (int mem_lvl = 0; mem_lvl < LOWMEM_MAX_LEVEL; mem_lvl++)
- _I("[DEBUG] set threshold for state '%s' to %u MB",
+ _I("set threshold for state '%s' to %u MB",
convert_memstate_to_str(mem_lvl), get_root_memcg_info()->threshold[mem_lvl]);
- _I("[DEBUG] set number of max victims as %d", num_max_victims);
- _I("[DEBUG] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
- _I("[DEBUG] set proactive threshold to %u MB", proactive_threshold);
- _I("[DEBUG] set proactive low memory killer leave to %u MB", proactive_leave);
+ _I("set number of max victims as %d", num_max_victims);
+ _I("set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
+ _I("set proactive threshold to %u MB", proactive_threshold);
+ _I("set proactive low memory killer leave to %u MB", proactive_leave);
/* print info of POPUP section */
- _I("[DEBUG] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
+ _I("oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
/* print info of BackgroundReclaim section */
- _I("[DEBUG] Background reclaim is %s", bg_reclaim == true ? "enabled" : "disabled");
+ _I("Background reclaim is %s", bg_reclaim == true ? "enabled" : "disabled");
/* print info of Logging section */
- _I("[DEBUG] memory logging is %s", memlog_enabled == 1 ? "enabled" : "disabled");
- _I("[DEBUG] memory logging path is %s", memlog_path);
- _I("[DEBUG] the max number of memory logging is %d", memlog_nr_max);
- _I("[DEBUG] the batch threshold of memory log is %d", memlog_remove_batch_thres);
- _I("[DEBUG] prefix of memps is %s", memlog_prefix[MEMLOG_MEMPS]);
- _I("[DEBUG] prefix of memlimit memps is %s", memlog_prefix[MEMLOG_MEMPS_MEMLIMIT]);
+ _I("memory logging is %s", memlog_enabled == 1 ? "enabled" : "disabled");
+ _I("memory logging path is %s", memlog_path);
+ _I("the max number of memory logging is %d", memlog_nr_max);
+ _I("the batch threshold of memory log is %d", memlog_remove_batch_thres);
+ _I("prefix of memps is %s", memlog_prefix[MEMLOG_MEMPS]);
+ _I("prefix of memlimit memps is %s", memlog_prefix[MEMLOG_MEMPS_MEMLIMIT]);
/* print info of VIP_PROCESS section */
if (!vip_apps)
return;
- _I("[DEBUG] vip app list is");
+ _I("vip app list is");
for (int vip_index = 0; vip_index < vip_apps->len; vip_index++) {
char *vip_name = g_ptr_array_index(vip_apps, vip_index);
- _I("\t[DEBUG] %s", vip_name);
+ _I("\t%s", vip_name);
}
}
/* make a worker thread called low memory killer */
ret = lowmem_activate_worker();
if (ret) {
- _E("[DEBUG] oom thread create failed\n");
+ _E("oom thread create failed\n");
return ret;
}
/* register threshold and event fd */
ret = lowmem_press_setup_eventfd();
if (ret) {
- _E("[DEBUG] eventfd setup failed");
+ _E("eventfd setup failed");
return ret;
}
static int lowmem_exit(void)
{
-// cgroup_params_exit();
-// int i;
-/* for (i = CGROUP_ROOT; i < CGROUP_END; i++) {
- g_slist_free_full(memcg_tree[i]->cgroups, free);
- free(memcg_tree[i]);
- }*/
if (strncmp(event_level, MEMCG_DEFAULT_EVENT_LEVEL, sizeof(MEMCG_DEFAULT_EVENT_LEVEL)))
free(event_level);
lowmem_trigger_memory_state_action(mem_state);
}
-/*void memcg_set_threshold(int type, int level, int value)
-{
- memcg_tree[type]->info->threshold[level] = value;
-}
-
-void memcg_set_leave_threshold(int type, int value)
-{
- memcg_tree[type]->info->threshold_leave = value;
-}*/
-
unsigned long lowmem_get_ktotalram(void)
{
return ktotalram;
}
-/*int lowmem_get_memcg(enum cgroup_type type, struct memcg **memcg_ptr)
-{
-
- if (memcg_ptr == NULL || memcg_tree == NULL || type >= CGROUP_END)
- return RESOURCED_ERROR_FAIL;
-
- *memcg_ptr = memcg_tree[type];
-
- return RESOURCED_ERROR_NONE;
-}*/
-
void lowmem_restore_memcg(struct proc_app_info *pai)
{
char *cgpath;
pai->memory.memcg_info = mi;
if(strstr(cgpath, pai->appid))
pai->memory.use_mem_limit = true;
-/* if (index == MEMCG_LIMIT)
- pai->memory.use_mem_limit = true;*/
free(cgpath);
}
struct compact_control *compact;
int result = RESOURCED_ERROR_OUT_OF_MEMORY;
- _I("[DEBUG] compact init");
+ _I("[COMPACTION] compact init");
pthread_mutex_lock(&compact_data.drained_lock);
if (compact_data.compact) {
- _E("[DEBUG] Unbalanced calls to compact module load/unload\n");
+ _E("[COMPACTION] Unbalanced calls to compact module load/unload\n");
result = RESOURCED_ERROR_NONE;
goto leave;
}
result = pthread_mutex_init(&compact->lock, NULL);
if (result) {
- _E("[DEBUG] Failed to init compact lock: %m");
+ _E("[COMPACTION] Failed to init compact lock: %m");
goto cleanup_all;
}
compact);
if (compact->status & COMPACT_SKIP) {
- _I("[DEBUG] Compaction module disabled.");
+ _I("[COMPACTION] Compaction module disabled.");
result = RESOURCED_ERROR_FAIL;
goto cleanup_all;
}
static int dedup_scanning_once(enum ksm_scan_mode mode)
{
int ret;
- _D("[DEBUG] Invoke scanning once to KSM (mode: %d)", mode);
+ _D("[DEDUP] Invoke scanning once to KSM (mode: %d)", mode);
ret = dedup_check_and_scanning_once(mode);
return ret;
}
if (mode != KSM_SCAN_NONE) {
- _I("[DEBUG] dedup: %d-th %s deduplication triggering", nr_dedup++,
+ _I("[DEDUP] dedup: %d-th %s deduplication triggering", nr_dedup++,
(mode == KSM_SCAN_FULL ? "FULL" : "PARTIAL"));
if (!dedup_on_lowmem) {
dedup_scanning_once(KSM_SCAN_FULL);
static gboolean dedup_activate_timer_cb(gpointer data)
{
dedup_activating_timer = NULL;
- _D("[DEBUG] dedup activating callback called");
+ _D("[DEDUP] dedup activating callback called");
dedup_activate_in_module();
return false;
}
if (dedup_at_boot_enable) {
/* if dedup_at_boot_enable is disabled,
* other daemon should activate dedup */
- _D("[DEBUG] dedup booting done is called");
+ _D("[DEDUP] dedup booting done is called");
if (dedup_at_boot_delay > 0)
dedup_activating_timer =
g_timeout_source_new_seconds(dedup_at_boot_delay);
if (val >= dedup_param_ranges[ltype][0] &&
val < dedup_param_ranges[ltype][1]) {
*var = val;
- _I("[DEBUG] Success to parse parameters, val: %d of %s in %s section",
+ _I("[DEDUP] Success to parse parameters, val: %d of %s in %s section",
val, lvalue, section);
} else
- _E("[DEBUG] Failed to parse parameters, ignoring: %s of %s in %s section",
+ _E("[DEDUP] Failed to parse parameters, ignoring: %s of %s in %s section",
rvalue, lvalue, section);
} else if (!strncmp(section, KSM_SECTION, sizeof(KSM_SECTION))) {
if (val >= ksm_param_ranges[ltype][0] &&
val < ksm_param_ranges[ltype][1]) {
*var = val;
- _I("[DEBUG] Success to parse parameters, val: %d of %s in %s section",
+ _I("[DEDUP] Success to parse parameters, val: %d of %s in %s section",
val, lvalue, section);
} else
- _E("[DEBUG] Failed to parse parameters, ignoring: %s of %s in %s section",
+ _E("[DEDUP] Failed to parse parameters, ignoring: %s of %s in %s section",
rvalue, lvalue, section);
} else
- _E("[DEBUG] Unknown section: %s", section);
+ _E("[DEDUP] Unknown section: %s", section);
return 0;
}
ret = config_parse_new(DEDUP_CONF_FILE, (void*) items);
if (ret < 0) {
- _E("[DEBUG] Failed to parse configuration file: %d", ret);
+ _E("[DEDUP] Failed to parse configuration file: %d", ret);
return ret;
}
- _I("[DEBUG] dedup init");
+ _I("[DEDUP] dedup init");
ksm_params[KSM_PARAM_PAGES_TO_SCAN] = arg_ksm_pages_to_scan;
ksm_params[KSM_PARAM_SLEEP_MSECS] = arg_ksm_sleep;
dedup_partial_scan_interval /= 1000;
dedup_full_scan_interval /= 1000;
- _I("[DEBUG] deduplication mode: %s", dedup_mode == DEDUP_MODE_PERIODIC ?
+ _I("[DEDUP] deduplication mode: %s", dedup_mode == DEDUP_MODE_PERIODIC ?
"kernel-managed" : "resourced-triggered");
- _I("[DEBUG] deduplication on boot: %s", dedup_at_boot_enable ? "true" : "false");
- _I("[DEBUG] scanning is invoked by %s", dedup_on_lowmem ?
+ _I("[DEDUP] deduplication on boot: %s", dedup_at_boot_enable ? "true" : "false");
+ _I("[DEDUP] scanning is invoked by %s", dedup_on_lowmem ?
"LOWMEM event" : "periodic timer");
- _I("[DEBUG] full scan interval: %d sec", dedup_full_scan_interval);
- _I("stat monitoring interval: %d sec", dedup_stat_interval);
+ _I("[DEDUP] full scan interval: %d sec", dedup_full_scan_interval);
+ _I("[DEDUP] stat monitoring interval: %d sec", dedup_stat_interval);
- _I("[DEBUG] ksm pages to scan: %d", arg_ksm_pages_to_scan);
- _I("[DEBUG] ksm sleep time: %d", arg_ksm_sleep);
- _I("[DEBUG] ksm full scan interval: %d", arg_ksm_full_scan_interval);
- _I("[DEBUG] ksm scan boost: %d", arg_ksm_scan_boost);
+ _I("[DEDUP] ksm pages to scan: %d", arg_ksm_pages_to_scan);
+ _I("[DEDUP] ksm sleep time: %d", arg_ksm_sleep);
+ _I("[DEDUP] ksm full scan interval: %d", arg_ksm_full_scan_interval);
+ _I("[DEDUP] ksm scan boost: %d", arg_ksm_scan_boost);
return 0;
}
#include "losetup.h"
#include "init.h"
-/*#define MEMCG_PATH "/sys/fs/cgroup/memory/"
-#define MEMCG_SIZE_LIMIT "memory.limit_in_bytes"
-#define MEMCG_FORCE_RECLAIM "memory.force_reclaim"
-#define MEMCG_MOVE_CHARGE "memory.move_charge_at_immigrate"*/
-
#define SWAP_PRIORITY 20
#define SWAP_HARD_LIMIT_DEFAULT 0.5
#define SWAP_FORCE_RECLAIM_NUM_MAX 5
SWAP_OP_ACTIVATE,
SWAP_OP_RECLAIM,
SWAP_OP_COMPACT,
-// SWAP_OP_MOVE_TO_SWAP_AND_RECLAIM,
SWAP_OP_END,
};
error = proc_set_oom_score_adj(child, oom_score_adj, pai);
}
-/* if (!pai)
- return cgroup_write_pid_fullpath(mi->name, pid);
-
- ret = cgroup_write_pid_fullpath(mi->name, pai->main_pid);
- gslist_for_each_item(iter_child, pai->childs) {
- pid_t child = GPOINTER_TO_PID(iter_child->data);
- ret = cgroup_write_pid_fullpath(mi->name, child);
- }
- pai->memory.memcg_idx = CGROUP_LOW;
- pai->memory.memcg_info = mi;*/
return error;
}
-/*static int swap_move_to_cgroup(struct memcg_info *info, GArray *candidates)
-{
- int index;
- int error = RESOURCED_ERROR_NONE;
- int oom_score_adj;
- int lowest_oom_score_adj;
- struct swap_task tsk;
- struct proc_app_info *pai = NULL;
- GSList *iter_child = NULL;
-
- if (!candidates)
- return RESOURCED_ERROR_NO_DATA;
-
- for (index = 0; index < candidates->len; index++) {
- tsk = g_array_index(candidates, struct swap_task, index);
- pai = tsk.pai;
-
- if (!pai) {
- _E("[DEBUG] Cannot find out proc_app_info");
- continue;
- }
-
- error = proc_get_oom_score_adj(pai->main_pid, &oom_score_adj);
- if (error) {
- _E("[DEBUG] Cannot get oom_score_adj of pid (%d)", pai->main_pid);
- continue;
- }
-
- lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(CGROUP_LOW);
-
- if (oom_score_adj < lowest_oom_score_adj)
- oom_score_adj = lowest_oom_score_adj;
-
- proc_set_oom_score_adj(pai->main_pid, oom_score_adj, pai);
- if (!pai->childs)
- continue;
-
- gslist_for_each_item(iter_child, pai->childs) {
- pid_t child = GPOINTER_TO_PID(iter_child->data);
- error = proc_set_oom_score_adj(child, oom_score_adj, pai);
- }
- }
- return error;
-}*/
-
-/*static int swap_sort_by_oom(const struct swap_task *ta,
- const struct swap_task *tb)
-{
- // sort by oom score adj
- assert(ta != NULL);
- assert(tb != NULL);
-
- return ((int)(tb->pai->memory.oom_score_adj) -
- (int)(ta->pai->memory.oom_score_adj));
-}*/
-
-/*static int swap_sort_by_vmrss(const struct swap_task *ta,
- const struct swap_task *tb)
-{
- // sort by task memory usage
- assert(ta != NULL);
- assert(tb != NULL);
-
- return ((int)(tb->size) - (int)(ta->size));
-}*/
-
-/*static int swap_reduce_victims(GArray *candidates, int max)
-{
- int index;
- struct swap_task tsk;
- struct proc_app_info *pai = NULL;
- unsigned int usage = 0;
-
- if (!candidates)
- return RESOURCED_ERROR_NO_DATA;
-
- for (index = 0; index < candidates->len; index++) {
- tsk = g_array_index(candidates, struct swap_task, index);
- pai = tsk.pai;
-
- // Measuring VmRSS is OK as it's anonymous + swapcache
- if (proc_get_approx_mem_usage(pai->main_pid, &usage) < 0)
- continue;
-
- tsk.size += usage;
-
- if (pai->childs) {
- GSList *iter_child = NULL;
-
- gslist_for_each_item(iter_child, pai->childs) {
- pid_t child = GPOINTER_TO_PID(iter_child->data);
- if (proc_get_approx_mem_usage(child, &usage) < 0)
- continue;
- tsk.size += usage;
- }
- }
- }
- // sort by oom_score_adj value, older are better candidates
- g_array_sort(candidates, (GCompareFunc)swap_sort_by_oom);
-
- // sort by memory usage, swapping bigger will free more memory
- g_array_sort(candidates, (GCompareFunc)swap_sort_by_vmrss);
-
- // limit the number of potential candidates, after sort by oom
- g_array_remove_range(candidates, max, candidates->len - max);
-
- return RESOURCED_ERROR_NONE;
-}*/
static int swap_use_hard_limit(char *memcg)
{
return swap_start_reclaim(info->name);
}
-/*static int swap_move_inactive_to_swap(struct swap_status_msg *msg)
-{
- GSList *proc_app_list = NULL;
- GSList *iter;
- int ret, max_victims;
- struct swap_task victim;
- GArray *candidates = NULL;
- struct cgroup *cgroup_swap = NULL;
- struct proc_app_info *pai = NULL;
-
- candidates = g_array_new(false, false, sizeof(struct swap_task));
- if (!candidates) {
- _E("failed to allocate memory");
- return RESOURCED_ERROR_OUT_OF_MEMORY;
- }
- memset(&victim, 0, sizeof(struct swap_task));
-
- proc_app_list = proc_app_list_open();
- gslist_for_each_item(iter, proc_app_list) {
- pai = (struct proc_app_info *)iter->data;
- if ((!pai->main_pid) ||
- (pai->memory.memcg_info != msg->memcg_info) ||
- (pai->memory.oom_score_adj < OOMADJ_BACKGRD_UNLOCKED) ||
- (pai->lru_state <= PROC_BACKGROUND))
- continue;
- victim.pai = pai;
- g_array_append_val(candidates, victim);
- }
- max_victims = candidates->len >> 1;
- if (max_victims == 0) {
- ret = RESOURCED_ERROR_NO_DATA;
- goto out;
- }
- swap_reduce_victims(candidates, max_victims);
- cgroup_swap = get_cgroup_tree(CGROUP_LOW);
- if(!cgroup_swap)
- goto out;
-
- msg->memcg_info = cgroup_swap->memcg_info;
- msg->type = CGROUP_LOW;
- ret = swap_move_to_cgroup(msg->memcg_info, candidates);
-out:
- proc_app_list_close();
- g_array_free(candidates, TRUE);
-
- return ret;
-}*/
-
static int gen_urandom_string(char *buf, size_t len)
{
_cleanup_close_ int fd = -1;
swap_compact_in_module();
break;
/* Move inactive procesess to swap, and reclaim after that. */
-/* case SWAP_OP_MOVE_TO_SWAP_AND_RECLAIM:
- ret = swap_move_inactive_to_swap(&(bundle->msg));
- if (ret == RESOURCED_ERROR_NONE)
- swap_reclaim_memcg(bundle->msg);
- break;*/
case SWAP_OP_END:
default:
_D("wrong swap thread operation selected");
static void swap_start_pid_dbus_signal_handler(GVariant *params)
{
-// int ret;
pid_t pid;
-// struct memcg *memcg_swap;
struct cgroup *cgroup_swap;
struct swap_status_msg ss_msg;
cgroup_swap = get_cgroup_tree(CGROUP_LOW);
if (!cgroup_swap)
return;
-/* ret = lowmem_get_memcg(CGROUP_LOW, &memcg_swap);
- if (ret != RESOURCED_ERROR_NONE)
- return;*/
swap_move_to_cgroup_by_pid(CGROUP_LOW, pid);
ss_msg.pid = pid;
ss_msg.type = CGROUP_LOW;
static void resourced_swap_change_memcg_settings(enum cgroup_type type)
{
int ret;
-// struct memcg *memcg_swap = NULL;
struct cgroup *cgroup_swap = NULL;
char buf[MAX_PATH_LENGTH];
cgroup_swap = get_cgroup_tree(type);
if (!cgroup_swap)
return;
-/* ret = lowmem_get_memcg(type, &memcg_swap);
- if (ret != RESOURCED_ERROR_NONE)
- return;*/
cgroup_write_node_uint32(cgroup_swap->memcg_info->name, MEMCG_MOVE_CHARGE, 1);
snprintf(buf, sizeof(buf), "%s/%s", MEMCG_PATH, MEMCG_FORCE_RECLAIM);