+++ /dev/null
-[PerProcess]
-Service=dbus.service
-CpuSched=fifo
-CpuRTPriority=5
-CpuRTRunTime=100ms
-CpuRTPeriod=300ms
-
-[PerProcess]
-Service=alarm-server.service
-CpuSched=rr
-CpuRTPriority=10
-CpuRTRunTime=100ms
-CpuRTPeriod=300ms
-
-[PerProcess]
-Service=ac.service
-CpuSched=other
-CpuNice=0
-
-[PerProcess]
-Service=scim.service
-CpuNice=1
-CpuPriority=0
-
-[PerProcess]
-Service=launchpad-process-pool.service
-CpuSched=batch
-CpuNice=2
-
-[PerProcess]
-Service=starter.service
-CpuSched=deadline
-
-[PerProcess]
-Service=at-spi-dbus-bus.service
-CpuSched=idle
return fwrite_int(buf, value);
}
+int cgroup_write_node_ulonglong(const char *cgroup_name,
+ const char *file_name, unsigned long long value)
+{
+ char buf[MAX_PATH_LENGTH];
+ snprintf(buf, sizeof(buf), "%s/%s", cgroup_name, file_name);
+ _SD("cgroup_buf %s, value %llu\n", buf, value);
+ return fwrite_ulonglong(buf, value);
+}
+
int cgroup_write_node_str(const char *cgroup_name,
const char *file_name, const char *string)
{
return ret;
}
+int cgroup_read_node_ulonglong(const char *cgroup_name,
+ const char *file_name, unsigned long long *value)
+{
+ char buf[MAX_PATH_LENGTH];
+ int ret;
+ snprintf(buf, sizeof(buf), "%s/%s", cgroup_name, file_name);
+ ret = fread_ulonglong(buf, value);
+ _SD("cgroup_buf %s, value %llu\n", buf, *value);
+ return ret;
+}
+
int cgroup_make_full_subdir(const char* parentdir)
{
int result;
const char *file_name, int32_t *value);
/**
+ * @desc Get one unsigned long long value from cgroup
+ * @param cgroup_name - cgroup path
+ * @param file_name - cgroup content to write
+ * @param value - out parameter, value to fill
+ * @return negative value if error
+ */
+int cgroup_read_node_ulonglong(const char *cgroup_name,
+ const char *file_name, unsigned long long *value);
+
+/**
* @desc Put unsigned int32 value to cgroup,
* @param cgroup_name - cgroup path
* @param file_name - cgroup content to write
int cgroup_write_node_int32(const char *cgroup_name, const char *file_name, int32_t value);
/**
+ * @desc Put unsigned long long value to cgroup,
+ * @param cgroup_name - cgroup path
+ * @param file_name - cgroup content to write
+ * @param value - unsigned long data to write
+ * @return negative value if error
+ */
+int cgroup_write_node_ulonglong(const char *cgroup_name, const char *file_name, unsigned long long value);
+
+/**
* @desc Put value to cgroup,
* @param cgroup_name - cgroup path
* @param file_name - cgroup content to write
static int default_swappiness = 0;
+static unsigned long long totalram_bytes = 0;
+
/*
* Special node that point's to /sys/fs/cgroup/memory - root of memcg group.
* This is the same as memcg_tree[CGROUP_ROOT]->info.
{MEMCG_LOW_PATH,},
};
+void register_totalram_bytes(unsigned long long ram_bytes)
+{
+ totalram_bytes = ram_bytes;
+}
+
int set_mem_action_conf(struct mem_action *mem_action, const char *value)
{
char *ptr = strchr(value, ',');
*(ptr - 2) = '\0';
if (temp == 'G') {
- mem_action->memory = GBYTE_TO_BYTE(atoi(value));
+ mem_action->memory_bytes = GBYTE_TO_BYTE(atoi(value));
}
else if (temp == 'M') {
- mem_action->memory = MBYTE_TO_BYTE(atoi(value));
+ mem_action->memory_bytes = MBYTE_TO_BYTE(atoi(value));
}
else if (temp == 'K') {
- mem_action->memory = KBYTE_TO_BYTE(atoi(value));
- }
- else if (temp == ' ') {
- mem_action->memory = atoi(value);
+ mem_action->memory_bytes = KBYTE_TO_BYTE(atoi(value));
}
else {
- _E("Memory size unit should be GB or MB or KB or B");
+ _E("Memory size unit should be GB or MB or KB");
return RESOURCED_ERROR_FAIL;
}
memcg_conf->threshold[lvl].threshold =
KBYTE_TO_MBYTE(atoi(value));
}
- else if (size == ' ') {
- memcg_conf->threshold[lvl].threshold =
- BYTE_TO_MBYTE(atoi(value));
- }
else {
- _E("Memory size unit should be GB or MB or KB or B");
+ _E("Memory size unit should be GB or MB or KB");
return RESOURCED_ERROR_FAIL;
}
}
free(memcg_conf);
}
-static void set_limit_in_bytes(const char *dir, unsigned int limit)
+static void set_limit_in_bytes(const char *dir, unsigned long long limit_bytes)
{
int error;
- unsigned int prev;
+ unsigned long long prev_bytes;
- error = cgroup_read_node_uint32(dir, MEMCG_LIMIT_BYTE, &prev);
+ error = cgroup_read_node_ulonglong(dir, MEMCG_LIMIT_BYTE, &prev_bytes);
if (error) {
_E("[MEMORY-LIMIT] Failed to get %s from %s", MEMCG_LIMIT_BYTE, dir);
return;
}
- if (limit == prev)
+ if (limit_bytes == prev_bytes)
return;
- if (prev > limit) {
- cgroup_write_node_uint32(dir, MEMCG_LIMIT_BYTE, limit);
- cgroup_write_node_uint32(dir, MEMCG_SWAP_LIMIT_BYTE, limit);
+ if (totalram_bytes > 0 && limit_bytes > totalram_bytes)
+ limit_bytes = totalram_bytes;
+
+ if (prev_bytes > limit_bytes) {
+ cgroup_write_node_ulonglong(dir, MEMCG_LIMIT_BYTE, limit_bytes);
+ cgroup_write_node_ulonglong(dir, MEMCG_SWAP_LIMIT_BYTE, limit_bytes);
}
else {
- cgroup_write_node_uint32(dir, MEMCG_SWAP_LIMIT_BYTE, limit);
- cgroup_write_node_uint32(dir, MEMCG_LIMIT_BYTE, limit);
+ cgroup_write_node_ulonglong(dir, MEMCG_SWAP_LIMIT_BYTE, limit_bytes);
+ cgroup_write_node_ulonglong(dir, MEMCG_LIMIT_BYTE, limit_bytes);
}
}
-int check_oom_and_set_limit(const char *dir, unsigned int limit)
+int check_oom_and_set_limit(const char *dir, unsigned long long limit_bytes)
{
int error;
static unsigned int poo = -1;
error = fread_uint("/proc/sys/vm/panic_on_oom", &poo);
if (error) {
_E("[MEMORY-LIMIT] Failed to get %s from %s", "/proc/sys/vm/panic_on_oom", dir);
- return RESOURCED_ERROR_FAIL;
+ poo = 0;
}
}
}
}
- set_limit_in_bytes(dir, limit);
+ set_limit_in_bytes(dir, limit_bytes);
return RESOURCED_ERROR_NONE;
}
static int memcg_write_limiter_info(struct memcg_info *mi)
{
- unsigned int limit = mi->limit;
+ unsigned long long limit_bytes = mi->limit_bytes;
const char *name = mi->name;
int ret = RESOURCED_ERROR_NONE;
/* enable cgroup move */
if (mi->limit_ratio == MEMCG_NO_LIMIT)
return ret;
+ _I("[MEMORY-LIMIT] dir = %s, limit = %llu bytes", name, limit_bytes);
/* write limit_in_bytes */
- ret = check_oom_and_set_limit(name, limit);
+ ret = check_oom_and_set_limit(name, limit_bytes);
return ret;
}
int memcg_write_limiter_params(void)
{
unsigned int i;
- unsigned int lower_group_limit = 0;
+ unsigned long long lower_group_limit_bytes = 0;
for (i = CGROUP_LOW; i > CGROUP_ROOT; i--) {
struct memcg_info *mi = get_memcg_info(i);
- if (mi->limit < lower_group_limit)
- mi->limit = lower_group_limit;
+ if (mi->limit_bytes < lower_group_limit_bytes)
+ mi->limit_bytes = lower_group_limit_bytes;
memcg_write_limiter_info(mi);
- lower_group_limit = mi->limit;
+ lower_group_limit_bytes = mi->limit_bytes;
}
return RESOURCED_ERROR_NONE;
if(!mi)
_E("memory cgroup of %d is NULL", type);
else
- mi->threshold[level] = value;
+ mi->threshold_mb[level] = value;
}
void memcg_set_leave_threshold(int type, int value)
if(!mi)
_E("memory cgroup of %d is NULL", type);
else
- mi->threshold_leave = value;
+ mi->threshold_leave_mb = value;
}
void memcg_info_set_limit(struct memcg_info *mi, float ratio,
- unsigned int totalram)
+ unsigned long long totalram_bytes)
{
if (!mi)
return;
- mi->limit = (float)totalram * ratio;
+ mi->limit_bytes = (double)totalram_bytes * ratio;
mi->limit_ratio = ratio;
}
return 0;
}
-int memcg_get_anon_usage(char *memcg, unsigned int *anon_usage)
+int memcg_get_anon_usage(char *memcg, unsigned long long *anon_usage_bytes)
{
int r;
_cleanup_free_ struct cgroup_memory_stat *mem_stat = NULL;
return r;
}
- *anon_usage = mem_stat->value[CGROUP_MEMORY_STAT_INACTIVE_ANON] +
+ *anon_usage_bytes = mem_stat->value[CGROUP_MEMORY_STAT_INACTIVE_ANON] +
mem_stat->value[CGROUP_MEMORY_STAT_ACTIVE_ANON];
return 0;
}
-int memcg_get_swap_usage(char *memcg, unsigned int *usage)
+int memcg_get_swap_usage(char *memcg, unsigned long long *usage_bytes)
{
int r;
_cleanup_free_ struct cgroup_memory_stat *mem_stat = NULL;
return r;
}
- *usage = mem_stat->value[CGROUP_MEMORY_STAT_SWAP];
+ *usage_bytes = mem_stat->value[CGROUP_MEMORY_STAT_SWAP];
return 0;
}
};
struct mem_action {
- unsigned int memory; /* Byte */
+ unsigned long long memory_bytes; /* Byte */
int action;
};
/* parent id */
/* limit ratio, if don't want to set limit, use NO_LIMIT*/
float limit_ratio;
- unsigned int limit;
+ unsigned long long limit_bytes;
/* thresholds, normal, swap, low, medium, and leave */
- unsigned int threshold[MEM_LEVEL_MAX]; /* MB */
- unsigned int threshold_leave; /* MB */
+ unsigned int threshold_mb[MEM_LEVEL_MAX]; /* MB */
+ unsigned int threshold_leave_mb; /* MB */
int evfd;
int swappiness;
};
long long value[CGROUP_MEMORY_STAT_MAX];
};
+void register_totalram_bytes(unsigned long long ram_bytes);
int set_mem_action_conf(struct mem_action *mem_action, const char *value);
int set_memcg_conf_threshold(bool percent, char size, int lvl, const char *value);
struct memcg_conf *get_memcg_conf(void);
void memcg_set_threshold(int type, int level, int value);
void memcg_set_leave_threshold(int type, int value);
-void memcg_info_set_limit(struct memcg_info *memcg_info, float ratio,
- unsigned int totalram);
+void memcg_info_set_limit(struct memcg_info *mi, float ratio,
+ unsigned long long totalram_bytes);
void memcg_set_default_swappiness(int swappiness);
void memcg_info_set_swappiness(struct memcg_info *mi, int swappiness);
int memcg_get_memory_stat(const char *name, struct cgroup_memory_stat **mem_stat);
* @desc get anon memory usage of cgroup based on memory.stat
* @return 0 if the value was correctly read
*/
-int memcg_get_anon_usage(char *memcg, unsigned int *anon_usage);
+int memcg_get_anon_usage(char *memcg, unsigned long long *anon_usage);
/**
* @desc get swapped memory usage of cgroup mi based on memory.stat
* @return 0 if the value was correctly read
*/
-int memcg_get_swap_usage(char *memcg, unsigned int *usage);
+int memcg_get_swap_usage(char *memcg, unsigned long long *usage);
/**
* @desc register eventfd to the memory cgroup with desired value
int memcg_set_eventfd(const char *memcg, const char *event, const char *value);
int memcg_init_eventfd(int evfd, const char *memcg, const char *event, const char *value);
-int check_oom_and_set_limit(const char *dir, unsigned int limit);
+int check_oom_and_set_limit(const char *dir, unsigned long long limit);
struct memcg_info *get_root_memcg_info(void);
void memcg_params_init(void);
return fwrite_str(path, digit_buf);
}
+int fwrite_ulonglong(const char *path, const unsigned long long number)
+{
+ _cleanup_free_ char *digit_buf = NULL;
+ int ret;
+
+ ret = asprintf(&digit_buf, "%llu", number);
+ ret_value_errno_msg_if(ret < 0, -ENOMEM,
+ "sprintf failed\n");
+
+ return fwrite_str(path, digit_buf);
+}
+
int fread_str(const char *path, char **str)
{
_cleanup_fclose_ FILE *f = NULL;
*number = t;
return RESOURCED_ERROR_NONE;
}
+
+int fread_nth_ulonglong(const char *path, size_t n, unsigned long long *number)
+{
+ _cleanup_fclose_ FILE *f = NULL;
+ size_t i;
+ unsigned long long t;
+ int ret;
+
+ f = fopen(path, "r");
+ ret_value_errno_msg_if(!f, -errno,
+ "Fail to open %s file.", path);
+
+ errno = 0;
+ for (i = 0; i <= n; i++) {
+ ret = fscanf(f, "%llu", &t);
+ ret_value_errno_msg_if(ret == EOF, -(errno ?: ENOENT),
+ "Fail to read file\n");
+ }
+
+ *number = t;
+ return RESOURCED_ERROR_NONE;
+}
int fwrite_uint(const char *path, const u_int32_t number);
int fwrite_ulong(const char *path, const unsigned long number);
+int fwrite_ulonglong(const char *path, const unsigned long long number);
int fread_str(const char *path, char **str);
}
int fread_nth_ulong(const char *path, size_t n, unsigned long *number);
+int fread_nth_ulonglong(const char *path, size_t n, unsigned long long *number);
inline int fread_ulong(const char *path, unsigned long *number)
{
return fread_nth_ulong(path, 0, number);
}
+inline int fread_ulonglong(const char *path, unsigned long long *number)
+{
+ return fread_nth_ulonglong(path, 0, number);
+}
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
return 0;
}
- _E("[%d, %p] is not registered!", status, func);
+ _E("[%d, %p] is unregistered!", status, func);
return -EINVAL;
}
struct proc_limit_status {
struct proc_status ps;
- unsigned int limit;
+ unsigned long limit_bytes;
enum proc_action action;
};
return RESOURCED_ERROR_NONE;
}
-int proc_get_mem_status(pid_t pid, unsigned int *vmswap, unsigned int *vmrss)
+int proc_get_mem_status(pid_t pid, unsigned int *vmswap_kb, unsigned int *vmrss_kb)
{
char filename[PROC_BUF_MAX];
_cleanup_fclose_ FILE *fp = NULL;
- unsigned int swap = 0, rss = 0;
+ unsigned int swap_kb = 0, rss_kb = 0;
snprintf(filename, PROC_BUF_MAX, "/proc/%d/status", pid);
fp = fopen(filename, "r");
if (!fp)
return RESOURCED_ERROR_FAIL;
- if (vmrss != NULL) {
+ if (vmrss_kb != NULL) {
while (fgets(filename, sizeof(filename), fp)) {
/* Skip the lines, until first match */
if (!strstart_with(filename, "VmRSS:"))
continue;
/* Read RSS value and end this loop. */
- if (sscanf(filename, "VmRSS: %d kB", &rss) == 1)
+ if (sscanf(filename, "VmRSS: %u kB", &rss_kb) == 1)
break;
return RESOURCED_ERROR_NO_DATA;
}
- *vmrss = rss;
+ *vmrss_kb = rss_kb;
}
- if (vmswap != NULL) {
+ if (vmswap_kb != NULL) {
/* Interate over rest of Vm* values */
while (fgets(filename, sizeof(filename), fp)) {
/* Read VmSwap and return with positive result */
- if (sscanf(filename, "VmSwap: %d kB", &swap) == 1)
+ if (sscanf(filename, "VmSwap: %d kB", &swap_kb) == 1)
break;
/* End of file before VmSwap read, return with error */
if (feof(fp))
return RESOURCED_ERROR_NO_DATA;
}
- *vmswap = swap;
+ *vmswap_kb = swap_kb;
}
return RESOURCED_ERROR_NONE;
return RESOURCED_ERROR_NONE;
}
-int proc_get_zram_usage(pid_t pid, unsigned int *usage)
+int proc_get_zram_usage(pid_t pid, unsigned int *usage_kb)
{
int ret;
struct meminfo mi;
- static unsigned int swap_total = 0;
- unsigned int proc_swap_usage, zram_usage;
+ static unsigned int swap_total_kb = 0;
+ unsigned int proc_swap_usage_kb;
+ unsigned long long zram_usage_bytes;
/* Read total swap size just once and cache it */
- if (!swap_total) {
+ if (!swap_total_kb) {
ret = proc_get_meminfo(&mi, MEMINFO_MASK_SWAP_TOTAL);
if (ret < 0) {
_E("Failed to get %s: %m",
meminfo_id_to_string(MEMINFO_ID_SWAP_TOTAL));
return RESOURCED_ERROR_FAIL;
}
- swap_total = mi.value[MEMINFO_ID_SWAP_TOTAL];
+ swap_total_kb = mi.value[MEMINFO_ID_SWAP_TOTAL];
}
/* Read usage of Swap (VmSwap) of interested process */
- ret = proc_get_mem_status(pid, &proc_swap_usage, NULL);
+ ret = proc_get_mem_status(pid, &proc_swap_usage_kb, NULL);
if (ret != RESOURCED_ERROR_NONE)
return ret;
/* Read current total memory usage of zram device */
- ret = fread_nth_uint(SWAP_ZRAM_SYSFILE"mm_stat", 2, &zram_usage);
+ ret = fread_nth_ulonglong(SWAP_ZRAM_SYSFILE"mm_stat", 2, &zram_usage_bytes);
if (ret == -ENOENT) {
- ret = fread_uint(SWAP_ZRAM_SYSFILE"mem_used_total", &zram_usage);
+ ret = fread_ulonglong(SWAP_ZRAM_SYSFILE"mem_used_total", &zram_usage_bytes);
}
if (ret < 0)
* Calculate aproximated value of zram usage for selected process
* by formula: proc_zram_usage = ( VmSwap x ZramMemoryUsage )/SwapTotal
*/
- *usage = (int)((float)proc_swap_usage * BYTE_TO_KBYTE(zram_usage) / swap_total);
+ *usage_kb = (int)((float)proc_swap_usage_kb * BYTE_TO_KBYTE(zram_usage_bytes) / swap_total_kb);
return RESOURCED_ERROR_NONE;
}
-int proc_get_approx_mem_usage(pid_t pid, unsigned int *usage)
+int proc_get_approx_mem_usage(pid_t pid, unsigned int *usage_kb)
{
int ret;
- unsigned long resident = 0, shared = 0;
+ unsigned long resident_pages = 0, shared_pages = 0;
char filename[PROC_BUF_MAX];
_cleanup_fclose_ FILE *fp = NULL;
* seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
* size, resident, shared, text, data);
*/
- ret = fscanf(fp, "%*s %lu %lu %*s %*s %*s %*s\n", &resident, &shared);
+ ret = fscanf(fp, "%*s %lu %lu %*s %*s %*s %*s\n", &resident_pages, &shared_pages);
if (ret < 0)
return RESOURCED_ERROR_FAIL;
/*
* The value resident - shared is mostly similar to Uss.
*/
- *usage = BYTE_TO_KBYTE((resident - shared) << PAGE_SHIFT);
+ *usage_kb = BYTE_TO_KBYTE((unsigned long long)(resident_pages - shared_pages) << PAGE_SHIFT);
return RESOURCED_ERROR_NONE;
}
* @desc get how much ram is used in each application
* @return negative value if error or pid doesn't exist
*/
-int proc_get_ram_usage(pid_t pid, unsigned int *usage)
+int proc_get_ram_usage(pid_t pid, unsigned int *usage_kb)
{
int ret;
- unsigned int vmswap = 0, total = 0;
+ unsigned int vmswap_kb = 0, total_kb = 0;
- ret = proc_get_approx_mem_usage(pid, &total);
+ ret = proc_get_approx_mem_usage(pid, &total_kb);
if (ret < 0) {
_E("Failed to get usage : %d", pid);
return ret;
}
if (swap_get_state() == SWAP_ON) {
- ret = proc_get_mem_status(pid, &vmswap, NULL);
+ ret = proc_get_mem_status(pid, &vmswap_kb, NULL);
if (ret != RESOURCED_ERROR_NONE)
goto out;
* If it is necessary to know real ram size about each application,
* it should consider compression ratio.
*/
- vmswap *= MEM_SWAP_RATIO;
- total += vmswap;
+ vmswap_kb *= MEM_SWAP_RATIO;
+ total_kb += vmswap_kb;
}
out:
- *usage = total;
+ *usage_kb = total_kb;
return RESOURCED_ERROR_NONE;
}
return 0;
}
-int proc_get_ram_total(unsigned int *total)
+int proc_get_ram_total(unsigned int *total_kb)
{
unsigned long total_spanned = 0;
- static unsigned int total_ram = 0;
+ static unsigned int total_ram_kb = 0;
int ret;
const struct parse_arg args[] = {
PARSE_TAG("spanned[[:blank:]]+([0-9]+)\n",
PARSE_TAG_EMPTY(),
};
- if (total_ram > 0) {
- *total = total_ram;
+ if (total_ram_kb > 0) {
+ *total_kb = total_ram_kb;
return RESOURCED_ERROR_NONE;
}
if (ret != RESOURCED_ERROR_NONE)
return RESOURCED_ERROR_NO_DATA;
- total_ram = (unsigned int)BYTE_TO_KBYTE(total_spanned << PAGE_SHIFT);
- *total = total_ram;
+ total_ram_kb = (unsigned int)BYTE_TO_KBYTE((unsigned long long)total_spanned << PAGE_SHIFT);
+ *total_kb = total_ram_kb;
return RESOURCED_ERROR_NONE;
}
int oom_score_adj, ret;
unsigned int rss, swap;
struct meminfo mi;
- unsigned int free = 0;
- unsigned int total_mem = 0, available = 0, used;
- unsigned int swap_total = 0, swap_free = 0, zram_used, swap_used;
+ unsigned int free_kb = 0;
+ unsigned int total_mem_kb = 0, available_kb = 0, used_kb;
+ unsigned int swap_total_kb = 0, swap_free_kb = 0, swap_used_kb;
+ unsigned long long zram_used_bytes;
dir = opendir("/proc");
if (dir == NULL) {
return;
}
- total_mem = mi.value[MEMINFO_ID_MEM_TOTAL];
- free = mi.value[MEMINFO_ID_MEM_FREE];
- available = mi.value[MEMINFO_ID_MEM_AVAILABLE];
- swap_total = mi.value[MEMINFO_ID_SWAP_TOTAL];
- swap_free = mi.value[MEMINFO_ID_SWAP_FREE];
+ total_mem_kb = mi.value[MEMINFO_ID_MEM_TOTAL];
+ free_kb = mi.value[MEMINFO_ID_MEM_FREE];
+ available_kb = mi.value[MEMINFO_ID_MEM_AVAILABLE];
+ swap_total_kb = mi.value[MEMINFO_ID_SWAP_TOTAL];
+ swap_free_kb = mi.value[MEMINFO_ID_SWAP_FREE];
- used = total_mem - available;
- swap_used = swap_total - swap_free;
+ used_kb = total_mem_kb - available_kb;
+ swap_used_kb = swap_total_kb - swap_free_kb;
- ret = fread_nth_uint(SWAP_ZRAM_SYSFILE"mm_stat", 2, &zram_used);
+ ret = fread_nth_ulonglong(SWAP_ZRAM_SYSFILE"mm_stat", 2, &zram_used_bytes);
if (ret == -ENOENT) {
- ret = fread_uint(SWAP_ZRAM_SYSFILE"mem_used_total", &zram_used);
+ ret = fread_ulonglong(SWAP_ZRAM_SYSFILE"mem_used_total", &zram_used_bytes);
}
if (ret != RESOURCED_ERROR_NONE)
- zram_used = 0;
+ zram_used_bytes = 0;
LOG_DUMP(fp, "====================================================================\n");
LOG_DUMP(fp, "Total RAM size: \t%15d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(total_mem), total_mem);
+ KBYTE_TO_MBYTE(total_mem_kb), total_mem_kb);
LOG_DUMP(fp, "Used (Mem+Reclaimable): %15d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(total_mem - free), total_mem - free);
+ KBYTE_TO_MBYTE(total_mem_kb - free_kb), total_mem_kb - free_kb);
LOG_DUMP(fp, "Used (Mem+Swap): \t%15d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(used), used);
+ KBYTE_TO_MBYTE(used_kb), used_kb);
LOG_DUMP(fp, "Used (Mem): \t\t%15d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(used), used);
+ KBYTE_TO_MBYTE(used_kb), used_kb);
LOG_DUMP(fp, "Used (Swap): \t\t%15d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(swap_used), swap_used);
+ KBYTE_TO_MBYTE(swap_used_kb), swap_used_kb);
LOG_DUMP(fp, "Used (Zram block device): %13d MB( %6d kB)\n",
- BYTE_TO_MBYTE(zram_used), BYTE_TO_KBYTE(zram_used));
+ (int)BYTE_TO_MBYTE(zram_used_bytes), (int)BYTE_TO_KBYTE(zram_used_bytes));
LOG_DUMP(fp, "Mem Free:\t\t%15d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(free), free);
+ KBYTE_TO_MBYTE(free_kb), free_kb);
LOG_DUMP(fp, "Available (Free+Reclaimable):%10d MB( %6d kB)\n",
- KBYTE_TO_MBYTE(available), available);
+ KBYTE_TO_MBYTE(available_kb), available_kb);
return;
}
struct sched_attr {
uint32_t size; /* Size of this structure */
uint32_t sched_policy; /* Policy (SCHED_*) */
- uint64_t sched_flags; /* Flags */
+ unsigned long long sched_flags; /* Flags */
int32_t sched_nice; /* Nice value (SCHED_OTHER, SCHED_BATCH) */
uint32_t sched_priority; /* Static priority (SCHED_FIFO, SCHED_RR) */
/* Remaining fields are for SCHED_DEADLINE */
- uint64_t sched_runtime;
- uint64_t sched_deadline;
- uint64_t sched_period;
+ unsigned long long sched_runtime;
+ unsigned long long sched_deadline;
+ unsigned long long sched_period;
};
static inline void freep(void *p)
#define BYTE_TO_MBYTE(b) ((b) >> 20)
#define BYTE_TO_PAGE(b) ((b) >> 12)
-#define KBYTE_TO_BYTE(k) ((k) << 10)
+#define KBYTE_TO_BYTE(k) ((unsigned long long)(k) << 10)
#define KBYTE_TO_MBYTE(k) ((k) >> 10)
-#define MBYTE_TO_BYTE(m) ((m) << 20)
+#define MBYTE_TO_BYTE(m) ((unsigned long long)(m) << 20)
#define MBYTE_TO_KBYTE(m) ((m) << 10)
-#define GBYTE_TO_BYTE(g) ((g) << 30)
+#define GBYTE_TO_BYTE(g) ((unsigned long long)(g) << 30)
#define GBYTE_TO_MBYTE(g) ((g) << 10)
#define streq(a, b) (strncmp((a), (b), strlen(b)+1) == 0)
}
}
- if (pci->mem_action.memory && pci->mem_action.action) {
+ if (pci->mem_action.memory_bytes && pci->mem_action.action) {
struct proc_limit_status pls = {0, };
- pls.limit = pci->mem_action.memory;
+ pai->memory.memlimit_update_exclude = true;
+ pls.limit_bytes = pci->mem_action.memory_bytes;
pls.ps.pai = pai;
pls.action = pci->mem_action.action;
resourced_notify(RESOURCED_NOTIFIER_LIMIT_APP, &pls);
cgroup_name = arg[1];
if (argnum == 3)
pkg_name = arg[2];
- _SD("[PROCESS] appid %s, pid %d, status %d\n", cgroup_name, pid, status);
+ _SD("appid %s, pid %d, status %d\n", cgroup_name, pid, status);
return resourced_proc_status_change(status, pid, cgroup_name, pkg_name, PROC_TYPE_GUI);
}
{
FILE *proc_stat;
char buf[1024];
- unsigned long vsz = 0;
+ unsigned long long vsz_bytes = 0;
proc_stat = NULL;
goto error;
while (fgets(buf, sizeof(buf), proc_stat) != NULL) {
- if (sscanf(buf, "%*d %*s %*c %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %lu",
- &vsz) != 1)
+ if (sscanf(buf, "%*d %*s %*c %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d %llu",
+ &vsz_bytes) != 1)
goto error;
}
fclose(proc_stat);
- *vsize = BYTE_TO_KBYTE(vsz);
+ *vsize = BYTE_TO_KBYTE(vsz_bytes);
return RESOURCED_ERROR_NONE;
error:
static void lowmem_dbus_oom_set_threshold(GVariant *params)
{
- int level = -1, thres = -1;
+ int level = -1, thres_mb = -1;
const char *const gtype = "(ii)";
ret_if_gvariant_type_mismatch(params, gtype);
- g_variant_get(params, gtype, &level, &thres);
+ g_variant_get(params, gtype, &level, &thres_mb);
ret_unless(level >= 0);
- ret_unless(thres >= 0);
+ ret_unless(thres_mb >= 0);
- memcg_set_threshold(CGROUP_ROOT, level, thres);
+ memcg_set_threshold(CGROUP_ROOT, level, thres_mb);
}
static void lowmem_dbus_oom_set_leave_threshold(GVariant *params)
{
- int thres = -1;
+ int thres_mb = -1;
const char *const gtype = "(i)";
ret_if_gvariant_type_mismatch(params, gtype);
- g_variant_get(params, gtype, &thres);
- ret_unless(thres >= 0);
+ g_variant_get(params, gtype, &thres_mb);
+ ret_unless(thres_mb >= 0);
- memcg_set_leave_threshold(CGROUP_ROOT, thres);
+ memcg_set_leave_threshold(CGROUP_ROOT, thres_mb);
}
static void lowmem_dbus_oom_trigger(GVariant *params)
{
int result;
pid_t pid = 0;
- unsigned int limit = 0;
+ unsigned long limit_bytes = 0;
const char *const gtype = "(iu)";
struct proc_app_info *pai;
ret_if_gvariant_type_mismatch(params, gtype);
- g_variant_get(params, gtype, &pid, &limit);
+ g_variant_get(params, gtype, &pid, &limit_bytes);
ret_unless(pid > 0);
- ret_unless(limit > 0);
+ ret_unless(limit_bytes > 0);
pai = find_app_info(pid);
if (pai) {
- _I("[MEMORY-LIMIT] name: %s, limit: %u", pai->appid, limit);
if (pai->memory.memlimit_update_exclude)
return;
- lowmem_limit_set_app(limit, pai, PROC_ACTION_KILL);
+ lowmem_limit_set_app(limit_bytes, pai, PROC_ACTION_KILL);
}
else {
char appname[PROC_NAME_MAX];
_E("Failed to get cmdline basename of pid(%d)", pid);
return;
}
- lowmem_limit_set_system_service(pid, limit, appname, PROC_ACTION_KILL);
+ lowmem_limit_set_system_service(pid, limit_bytes, appname, PROC_ACTION_KILL);
}
}
struct memory_limit_event {
fd_handler_h fdh;
int fd;
- unsigned int threshold; /* byte */
+ unsigned long long threshold_bytes; /* byte */
char *path;
enum proc_action action;
GArray *pids_array;
*/
//void make_memps_log(enum mem_log path, pid_t pid, char *victim_name);
-void lowmem_memory_init(unsigned int service_limit, unsigned int widget_limit,
- unsigned int guiapp_limit, unsigned int bgapp_limit);
+void lowmem_memory_init(unsigned long long service_limit_bytes, unsigned long long widget_limit_bytes,
+ unsigned long long guiapp_limit_bytes, unsigned long long bgapp_limit_bytes);
void lowmem_action_init(int service_action, int widget_action,
int guiapp_action, int bgapp_action);
-int lowmem_limit_set_app(unsigned int limit, struct proc_app_info *pai,
+int lowmem_limit_set_app(unsigned long long limit_bytes, struct proc_app_info *pai,
enum proc_action action);
-void lowmem_limit_set_system_service(pid_t pid, unsigned int limit,
+int lowmem_limit_set_system_service(pid_t pid, unsigned long long limit_bytes,
const char *name, enum proc_action action);
void lowmem_dbus_init(void);
int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold);
-void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size);
+void lowmem_trigger_swap_reclaim(enum cgroup_type type, unsigned long long swap_size_bytes);
void lowmem_change_memory_state(int state, int force);
unsigned long lowmem_get_ktotalram(void);
-unsigned long lowmem_get_totalram(void);
+unsigned long long lowmem_get_totalram(void);
void lowmem_trigger_swap(pid_t pid, char *path, bool move);
void lowmem_limit_init(void);
void lowmem_limit_exit(void);
int lowmem_limit_move_cgroup(struct proc_app_info *pai);
int lowmem_reassign_limit(const char *dir,
- unsigned int limit, enum proc_action action);
+ unsigned long long limit_bytes, enum proc_action action);
unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk);
bool lowmem_fragmentated(void);
unsigned int lowmem_get_proactive_thres(void);
static GHashTable *memory_limit_hash;
static char *registerpath;
-static unsigned int mem_service_limit;
-static unsigned int mem_widget_limit;
-static unsigned int mem_guiapp_limit;
-static unsigned int mem_bgapp_limit;
+static unsigned long long mem_service_limit_bytes;
+static unsigned long long mem_widget_limit_bytes;
+static unsigned long long mem_guiapp_limit_bytes;
+static unsigned long long mem_bgapp_limit_bytes;
static int mem_service_action = PROC_ACTION_IGNORE;
static int mem_widget_action = PROC_ACTION_IGNORE;
if (mle->pids_array == NULL) {
_E("[MEMORY-LIMIT] pids array should not be NULL");
- goto mle_timer_init;
+ goto timer_out;
}
for (int i = 0; i < mle->pids_array->len; i++) {
g_array_free(mle->pids_array, true);
-mle_timer_init:
mle->pids_array = NULL;
timer_out:
return G_SOURCE_REMOVE;
int result;
pid_t pid;
GArray *pids_array = NULL;
- uint32_t usage;
- uint64_t dummy_efd;
+ unsigned long long usage_bytes;
+ unsigned long long dummy_efd;
char *cg_dir = (char *)data;
struct memory_limit_event *mle;
goto remove_mle;
}
- result = cgroup_read_node_uint32(cg_dir, MEMCG_SWAP_USAGE, &usage);
+ result = cgroup_read_node_ulonglong(cg_dir, MEMCG_SWAP_USAGE, &usage_bytes);
if (result < 0) {
- result = cgroup_read_node_uint32(cg_dir, MEMCG_USAGE, &usage);
+ result = cgroup_read_node_ulonglong(cg_dir, MEMCG_USAGE, &usage_bytes);
if (result < 0) {
_D("[MEMORY-LIMIT] there is no (%s) cgroup any longer, removed it", cg_dir);
goto remove_mle;
}
}
- if (usage < mle->threshold) {
- _D("[MEMORY-LIMIT] (%s) cgroup escaped low memory status. usage(%d), threshold(%d)",
- cg_dir, usage, mle->threshold);
+ if (usage_bytes < mle->threshold_bytes) {
+ _D("[MEMORY-LIMIT] (%s) cgroup escaped low memory status. usage(%llu) bytes, threshold(%llu) bytes",
+ cg_dir, usage_bytes, mle->threshold_bytes);
return true;
}
}
int lowmem_reassign_limit(const char *dir,
- unsigned int limit, enum proc_action action)
+ unsigned long long limit_bytes, enum proc_action action)
{
int fd;
fd_handler_h fdh = NULL;
gpointer hash_entry;
struct memory_limit_event *mle = NULL;
char buf[MAX_DEC_SIZE(int)] = {0};
+ unsigned long long max_limit_bytes;
if (memory_limit_hash) {
/* TO DO: currently concurrent processes with same app name are located
hash_entry = g_hash_table_lookup(memory_limit_hash, dir);
if (hash_entry) {
mle = (struct memory_limit_event *)hash_entry;
- if (mle->threshold == limit) {
+ if (mle->threshold_bytes == limit_bytes) {
return RESOURCED_ERROR_NONE;
}
}
}
}
- check_oom_and_set_limit(dir, limit * 1.2);
- snprintf(buf, sizeof(buf), "%d", limit);
+ if (limit_bytes > lowmem_get_totalram()) {
+ max_limit_bytes = lowmem_get_totalram();
+ limit_bytes = lowmem_get_totalram();
+ }
+ else if (limit_bytes * 1.2 > lowmem_get_totalram()) {
+ max_limit_bytes = lowmem_get_totalram();
+ }
+ else
+ max_limit_bytes = limit_bytes * 1.2;
+
+ check_oom_and_set_limit(dir, max_limit_bytes);
+
+ snprintf(buf, sizeof(buf), "%llu", limit_bytes);
if (mle) {
- mle->threshold = limit;
+ mle->threshold_bytes = limit_bytes;
memcg_init_eventfd(mle->fd, dir, registerpath, buf);
return RESOURCED_ERROR_NONE;
}
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
mle->action = action;
- mle->threshold = limit;
+ mle->threshold_bytes = limit_bytes;
mle->pids_array = NULL;
add_fd_read_handler(fd, memory_action_cb, mle->path, NULL, &fdh);
mle->fdh = fdh;
return RESOURCED_ERROR_NONE;
}
-void lowmem_limit_set_system_service(pid_t pid, unsigned int limit,
+int lowmem_limit_set_system_service(pid_t pid, unsigned long long limit_bytes,
const char *name, enum proc_action action)
{
_cleanup_free_ char *path = NULL;
int result;
- unsigned int totalram = lowmem_get_totalram();
+ unsigned long long totalram_bytes = lowmem_get_totalram();
- if (limit < MIN_LIMIT_VALUE || limit > totalram) {
- _E("[MEMORY-LIMIT] It's meaningless to set memory limit with size (%d)", limit);
- return;
+ if (limit_bytes < MIN_LIMIT_VALUE || limit_bytes > totalram_bytes) {
+ _E("[MEMORY-LIMIT] It's meaningless to set memory limit with size (%llu) bytes", limit_bytes);
+ return RESOURCED_ERROR_INVALID_PARAMETER;
}
if (action == PROC_ACTION_IGNORE)
- return;
+ return RESOURCED_ERROR_NONE;
if (!name) {
_E("[MEMORY-LIMIT] service name is NULL");
- return;
+ return RESOURCED_ERROR_FAIL;
}
result = asprintf(&path, "%s/%s", MEMCG_HIGH_PP_PATH, name);
if (result < 0) {
_E("[MEMORY-LIMIT] not enough memory");
- return;
+ return RESOURCED_ERROR_OUT_OF_MEMORY;
}
result = cgroup_make_subdir(MEMCG_HIGH_PP_PATH, name, NULL);
if (result < 0) {
_E("[MEMORY-LIMIT] Failed to create cgroup subdir '%s/%s'",
MEMCG_HIGH_PP_PATH, name);
- return;
+ return result;
}
- result = lowmem_reassign_limit(path, limit, action);
+ result = lowmem_reassign_limit(path, limit_bytes, action);
if (result < 0) {
_W("[MEMORY-LIMIT] Failed to reassign limit for %s", path);
- return;
+ return result;
}
result = cgroup_write_node_uint32(path, MEMCG_MOVE_CHARGE, 3U);
- if (result < 0)
+ if (result < 0) {
_W("[MEMORY-LIMIT] Failed to set immigrate mode for %s (non-crucial, continuing)", path);
+ return result;
+ }
cgroup_write_pid_fullpath(path, pid);
+
+ return RESOURCED_ERROR_NONE;
}
-int lowmem_limit_set_app(unsigned int limit, struct proc_app_info *pai,
+int lowmem_limit_set_app(unsigned long long limit_bytes, struct proc_app_info *pai,
enum proc_action action)
{
_cleanup_free_ char *path = NULL;
GSList *iter = NULL;
int result;
- unsigned int totalram = lowmem_get_totalram();
+ unsigned long long totalram_bytes = lowmem_get_totalram();
- if (limit < MIN_LIMIT_VALUE || limit > totalram) {
- _E("[MEMORY-LIMIT] It's meaningless to set memory limit with size (%d)", limit);
+ if (limit_bytes < MIN_LIMIT_VALUE || limit_bytes > totalram_bytes) {
+ _E("[MEMORY-LIMIT] It's meaningless to set memory limit with size (%llu) bytes", limit_bytes);
return RESOURCED_ERROR_INVALID_PARAMETER;
}
+ if (action == PROC_ACTION_IGNORE)
+ return RESOURCED_ERROR_NONE;
+
if (!pai) {
_E("[MEMORY-LIMIT] process app information is NULL");
return RESOURCED_ERROR_INVALID_PARAMETER;
return result;
}
- result = lowmem_reassign_limit(path, limit, action);
+ result = lowmem_reassign_limit(path, limit_bytes, action);
if (result < 0) {
_W("[MEMORY-LIMIT] Failed to reassign limit for %s", path);
return result;
struct proc_limit_status *pls = (struct proc_limit_status *)data;
- error = lowmem_limit_set_app(pls->limit, pls->ps.pai, pls->action);
+ error = lowmem_limit_set_app(pls->limit_bytes, pls->ps.pai, pls->action);
if (!error)
pls->ps.pai->memory.memlimit_update_exclude = true;
- return RESOURCED_ERROR_NONE;
+
+ return error;
}
static int lowmem_limit_system_service(void *data)
{
+ int error;
+
assert(data);
struct proc_limit_status *pls = (struct proc_limit_status *)data;
- lowmem_limit_set_system_service(pls->ps.pid, pls->limit, pls->ps.pci->name, pls->action);
- return RESOURCED_ERROR_NONE;
+ error = lowmem_limit_set_system_service(pls->ps.pid, pls->limit_bytes,
+ pls->ps.pci->name, pls->action);
+
+ return error;
}
static int lowmem_limit_service(void *data)
if (ps->pai && ps->pai->memory.memlimit_update_exclude)
return RESOURCED_ERROR_NONE;
- if (mem_service_limit && mem_service_action != PROC_ACTION_IGNORE) {
- lowmem_limit_set_app(mem_service_limit, ps->pai, mem_service_action);
+ if (mem_service_limit_bytes && mem_service_action != PROC_ACTION_IGNORE) {
+ lowmem_limit_set_app(mem_service_limit_bytes, ps->pai, mem_service_action);
}
return RESOURCED_ERROR_NONE;
}
if (ps->pai && ps->pai->memory.memlimit_update_exclude)
return RESOURCED_ERROR_NONE;
- if (mem_guiapp_limit && mem_guiapp_action != PROC_ACTION_IGNORE &&
+ if (mem_guiapp_limit_bytes && mem_guiapp_action != PROC_ACTION_IGNORE &&
ps->pai->type == PROC_TYPE_GUI) {
- lowmem_limit_set_app(mem_guiapp_limit, ps->pai, mem_guiapp_action);
+ lowmem_limit_set_app(mem_guiapp_limit_bytes, ps->pai, mem_guiapp_action);
}
- if (mem_widget_limit && mem_widget_action != PROC_ACTION_IGNORE &&
+ if (mem_widget_limit_bytes && mem_widget_action != PROC_ACTION_IGNORE &&
ps->pai->type == PROC_TYPE_WIDGET) {
- lowmem_limit_set_app(mem_widget_limit, ps->pai, mem_widget_action);
+ lowmem_limit_set_app(mem_widget_limit_bytes, ps->pai, mem_widget_action);
}
return RESOURCED_ERROR_NONE;
if (ps->pai && ps->pai->memory.memlimit_update_exclude)
return RESOURCED_ERROR_NONE;
- lowmem_limit_set_app(mem_bgapp_limit, ps->pai, mem_bgapp_action);
+ lowmem_limit_set_app(mem_bgapp_limit_bytes, ps->pai, mem_bgapp_action);
return RESOURCED_ERROR_NONE;
}
struct proc_status *ps = (struct proc_status *)data;
- if ((mem_guiapp_limit && ps->pai->type == PROC_TYPE_GUI) ||
- (mem_widget_limit && ps->pai->type == PROC_TYPE_WIDGET))
+ if ((mem_guiapp_limit_bytes && ps->pai->type == PROC_TYPE_GUI) ||
+ (mem_widget_limit_bytes && ps->pai->type == PROC_TYPE_WIDGET))
return lowmem_limit_appwidget(data);
_E("[MEMORY-LIMIT] Unable to set foreground app limit - app type not supported");
return RESOURCED_ERROR_NONE;
}
-void lowmem_memory_init(unsigned int service_limit, unsigned int widget_limit,
- unsigned int guiapp_limit, unsigned int bgapp_limit)
+void lowmem_memory_init(unsigned long long service_limit_bytes, unsigned long long widget_limit_bytes,
+ unsigned long long guiapp_limit_bytes, unsigned long long bgapp_limit_bytes)
{
- mem_service_limit = service_limit;
- mem_widget_limit = widget_limit;
- mem_guiapp_limit = guiapp_limit;
- mem_bgapp_limit = bgapp_limit;
+ mem_service_limit_bytes = service_limit_bytes;
+ mem_widget_limit_bytes = widget_limit_bytes;
+ mem_guiapp_limit_bytes = guiapp_limit_bytes;
+ mem_bgapp_limit_bytes = bgapp_limit_bytes;
+
+ _I("[MEMORY-LIMIT] service = %llu bytes, widget = %llu bytes, guiapp = %llu bytes, bgapp = %llu",
+ mem_service_limit_bytes, mem_widget_limit_bytes, mem_guiapp_limit_bytes, mem_bgapp_limit_bytes);
}
void lowmem_action_init(int service_action, int widget_action,
void lowmem_limit_init(void)
{
int result;
- unsigned int usage;
+ unsigned long long usage_bytes;
- result = cgroup_read_node_uint32(MEMCG_PATH, MEMCG_SWAP_USAGE, &usage);
+ result = cgroup_read_node_ulonglong(MEMCG_PATH, MEMCG_SWAP_USAGE, &usage_bytes);
if (result == RESOURCED_ERROR_NONE)
registerpath = MEMCG_SWAP_USAGE;
else
register_notifier(RESOURCED_NOTIFIER_LIMIT_SYSTEM_SERVICE, lowmem_limit_system_service);
register_notifier(RESOURCED_NOTIFIER_LIMIT_APP, lowmem_limit_app);
- if (mem_service_limit && mem_service_action != PROC_ACTION_IGNORE)
+ if (mem_service_limit_bytes && mem_service_action != PROC_ACTION_IGNORE)
register_notifier(RESOURCED_NOTIFIER_SERVICE_LAUNCH, lowmem_limit_service);
- if ((mem_guiapp_limit && mem_guiapp_action != PROC_ACTION_IGNORE) ||
- (mem_widget_limit && mem_widget_action != PROC_ACTION_IGNORE))
+ if ((mem_guiapp_limit_bytes && mem_guiapp_action != PROC_ACTION_IGNORE) ||
+ (mem_widget_limit_bytes && mem_widget_action != PROC_ACTION_IGNORE))
register_notifier(RESOURCED_NOTIFIER_APP_LAUNCH, lowmem_limit_appwidget);
- if (mem_bgapp_limit && mem_bgapp_action != PROC_ACTION_IGNORE) {
- if (!(mem_guiapp_limit && mem_guiapp_action != PROC_ACTION_IGNORE) ||
- !(mem_widget_limit && mem_widget_action != PROC_ACTION_IGNORE)) {
+ if (mem_bgapp_limit_bytes && mem_bgapp_action != PROC_ACTION_IGNORE) {
+ if (!(mem_guiapp_limit_bytes && mem_guiapp_action != PROC_ACTION_IGNORE) ||
+ !(mem_widget_limit_bytes && mem_widget_action != PROC_ACTION_IGNORE)) {
_W("[MEMORY-LIMIT] Background app limit requires that both GUIApp and Widget limits to be set to work properly. Ignoring.");
} else {
register_notifier(RESOURCED_NOTIFIER_APP_BACKGRD, lowmem_limit_bgapp);
FOREACH_DIRENT(de, d, return -errno) {
_cleanup_free_ char *path = NULL;
- unsigned int limit;
+ unsigned long long limit_bytes;
if (de->d_type != DT_DIR)
continue;
if (ret < 0)
return -ENOMEM;
- ret = cgroup_read_node_uint32(path, MEMCG_LIMIT_BYTE, &limit);
- if (ret != RESOURCED_ERROR_NONE ||limit <= 0)
+ ret = cgroup_read_node_ulonglong(path, MEMCG_LIMIT_BYTE, &limit_bytes);
+ if (ret != RESOURCED_ERROR_NONE ||limit_bytes <= 0)
continue;
if (changeswappiness >= 0) {
MEMCG_SWAPPINESS, changeswappiness, path);
}
- lowmem_reassign_limit(path, limit, PROC_ACTION_KILL);
+ lowmem_reassign_limit(path, limit_bytes, PROC_ACTION_KILL);
}
return RESOURCED_ERROR_NONE;
}
#define CGROUP_ROOT_3072_THRES_LEAVE 500 /* MB */
#define CGROUP_ROOT_3072_NUM_VICTIMS 10
-static unsigned proactive_threshold;
-static unsigned proactive_leave;
-static unsigned lmk_start_threshold;
+static unsigned proactive_threshold_mb;
+static unsigned proactive_leave_mb;
+static unsigned lmk_start_threshold_mb;
static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
enum cgroup_type type;
/* Desired size to be restored - level to be reached (MB)*/
- unsigned int size;
+ unsigned int size_mb;
/* Max number of processes to be considered */
unsigned int count;
/* Memory reclaim status */
#define LOWMEM_SET_REQUEST(c, __flags, __type, __size, __count, __cb) \
{ \
(c)->flags = __flags; (c)->type = __type; \
- (c)->size = __size; (c)->count = __count; \
+ (c)->size_mb= __size; (c)->count = __count; \
(c)->callback = __cb; \
}
static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
-static unsigned long totalram;
-static unsigned long ktotalram;
+static unsigned long long totalram_bytes;
+static unsigned long totalram_kb;
static struct module_ops memory_modules_ops;
static const struct module_ops *lowmem_ops;
static inline void get_total_memory(void)
{
struct sysinfo si;
- if (totalram)
+ if (totalram_bytes)
return;
if (!sysinfo(&si)) {
- totalram = si.totalram;
- ktotalram = BYTE_TO_KBYTE(totalram);
+ totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
+ totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
+
+ register_totalram_bytes(totalram_bytes);
+ }
+ else {
+ _E("Failed to get total ramsize from the kernel");
}
}
unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
{
- unsigned int size = 0, total_size = 0;
+ unsigned int size_kb = 0, total_size_kb = 0;
int index, ret;
pid_t pid;
* is used.
*/
if (tsk->pids == NULL) {
- ret = proc_get_ram_usage(tsk->pid, &size);
+ ret = proc_get_ram_usage(tsk->pid, &size_kb);
/* If there is no proc entry for given pid the process
* should be abandoned during further processing
if (ret < 0)
_D("failed to get rss memory usage of %d", tsk->pid);
- return size;
+ return size_kb;
}
for (index = 0; index < tsk->pids->len; index++) {
pid = g_array_index(tsk->pids, pid_t, index);
- ret = proc_get_ram_usage(pid, &size);
+ ret = proc_get_ram_usage(pid, &size_kb);
if (ret != RESOURCED_ERROR_NONE)
continue;
- total_size += size;
+ total_size_kb += size_kb;
}
- return total_size;
+ return total_size_kb;
}
static int lowmem_kill_victim(const struct task_info *tsk,
else
safe_kill(pid, SIGKILL);
- _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u, sigterm = %d\n",
+ _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u KB, sigterm = %d\n",
flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
tsk->size, sigterm);
*victim_size = tsk->size;
/* return LOWMEM_RECLAIM_CONT when killing should be continued */
static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
{
- unsigned int available;
+ unsigned int available_mb;
/*
* Processes with the priority higher than perceptible are killed
tsk->pid, flags);
return LOWMEM_RECLAIM_DROP;
}
- available = proc_get_mem_available();
- if (available > lmk_start_threshold) {
+ available_mb = proc_get_mem_available();
+ if (available_mb > lmk_start_threshold_mb) {
_I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
- available, lmk_start_threshold);
+ available_mb, lmk_start_threshold_mb);
return LOWMEM_RECLAIM_RETRY;
}
return LOWMEM_RECLAIM_CONT;
* followed by kernel badness point calculation using heuristic.
* oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
*/
- pa = ta->oom_score_lru * (ktotalram / 2000) + ta->size;
- pb = tb->oom_score_lru * (ktotalram / 2000) + tb->size;
+ pa = ta->oom_score_lru * (totalram_kb / 2000) + ta->size;
+ pb = tb->oom_score_lru * (totalram_kb / 2000) + tb->size;
return pb - pa;
}
static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
{
unsigned int available = proc_get_mem_available();
- unsigned int should_be_freed = 0;
+ unsigned int should_be_freed_mb = 0;
if (available < thres)
- should_be_freed = thres - available;
+ should_be_freed_mb = thres - available;
/*
* free THRESHOLD_MARGIN more than real should be freed,
* because launching app is consuming up the memory.
*/
- if (should_be_freed > 0)
- should_be_freed += THRESHOLD_MARGIN;
+ if (should_be_freed_mb > 0)
+ should_be_freed_mb += THRESHOLD_MARGIN;
*avail = available;
- return should_be_freed;
+ return should_be_freed_mb;
}
static int lowmem_get_pids_proc(GArray *pids)
*/
static int lowmem_kill_victims(int max_victims,
int start_oom, int end_oom, unsigned should_be_freed, int flags,
- unsigned int *total_size, int *completed, int threshold)
+ unsigned int *total_size, int *completed, unsigned int threshold)
{
int total_count = 0;
GSList *proc_app_list = NULL;
struct task_info *tsk;
tsk = &g_array_index(candidates, struct task_info, i);
- tsk->size = lowmem_get_task_mem_usage_rss(tsk);
+ tsk->size = lowmem_get_task_mem_usage_rss(tsk); /* KB */
}
/*
int count = 0, victim_cnt = 0;
int max_victim_cnt = ctl->count;
int status = LOWMEM_RECLAIM_NONE;
- unsigned int available = 0;
- unsigned int total_size = 0;
+ unsigned int available_mb = 0;
+ unsigned int total_size_mb = 0;
unsigned int current_size = 0;
- unsigned int reclaim_size, shortfall = 0;
+ unsigned int reclaim_size_mb, shortfall_mb = 0;
enum cgroup_type cgroup_type = ctl->type;
- available = proc_get_mem_available();
- reclaim_size = ctl->size > available
- ? ctl->size - available : 0;
+ available_mb = proc_get_mem_available();
+ reclaim_size_mb = ctl->size_mb > available_mb /* MB */
+ ? ctl->size_mb - available_mb : 0;
- if (!reclaim_size) {
+ if (!reclaim_size_mb) {
status = LOWMEM_RECLAIM_DONE;
goto done;
}
if (calculate_range_of_oom(cgroup_type, &start_oom, &end_oom))
goto done;
- lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
- shortfall = is_memory_recovered(&available, ctl->size);
+ lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
+ shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
- if (!shortfall || !reclaim_size) {
+ if (!shortfall_mb || !reclaim_size_mb) {
status = LOWMEM_RECLAIM_DONE;
goto done;
}
/* precaution */
current_size = 0;
victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
- reclaim_size, ctl->flags, ¤t_size, &status, ctl->size);
+ reclaim_size_mb, ctl->flags, ¤t_size, &status, ctl->size_mb);
if (victim_cnt) {
current_size = KBYTE_TO_MBYTE(current_size);
- reclaim_size -= reclaim_size > current_size
- ? current_size : reclaim_size;
- total_size += current_size;
+ reclaim_size_mb -= reclaim_size_mb > current_size
+ ? current_size : reclaim_size_mb;
+ total_size_mb += current_size;
count += victim_cnt;
- _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
+ _I("[LMK] current: kill %d victims, reclaim_size=%uMB from %d to %d status=%s",
victim_cnt, current_size,
start_oom, end_oom, convert_status_to_str(status));
}
}
done:
_I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
- count, total_size, reclaim_size, shortfall, convert_status_to_str(status));
+ count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
/* After we finish reclaiming it's worth to remove oldest memps logs */
ctl->status = status;
*/
if (ctl->status == LOWMEM_RECLAIM_RETRY &&
!(ctl->flags & OOM_SINGLE_SHOT)) {
- unsigned int available = proc_get_mem_available();
+ unsigned int available_mb = proc_get_mem_available();
- if (available >= ctl->size) {
+ if (available_mb >= ctl->size_mb) {
_I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
- ctl->size, available);
+ ctl->size_mb, available_mb);
ctl->status = LOWMEM_RECLAIM_DONE;
if (ctl->callback)
ctl->callback(ctl);
static void change_lowmem_state(unsigned int mem_state)
{
cur_mem_state = mem_state;
- lmk_start_threshold = get_root_memcg_info()->threshold[MEM_LEVEL_OOM];
+ lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
(void *)&cur_mem_state);
*/
static void lowmem_swap_memory(char *path)
{
- unsigned int available;
+ unsigned int available_mb;
if (cur_mem_state == MEM_LEVEL_HIGH)
return;
if (swap_get_state() != SWAP_ON)
return;
- available = proc_get_mem_available();
+ available_mb = proc_get_mem_available();
if (cur_mem_state != MEM_LEVEL_LOW &&
- available <= get_root_memcg_info()->threshold[MEM_LEVEL_LOW])
+ available_mb <= get_root_memcg_info()->threshold_mb[MEM_LEVEL_LOW])
swap_activate_act();
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
static void lmk_act(void)
{
- unsigned int available;
+ unsigned int available_mb;
int ret;
int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
}
- available = proc_get_mem_available();
+ available_mb = proc_get_mem_available();
change_lowmem_state(MEM_LEVEL_OOM);
- if (available < get_root_memcg_info()->threshold_leave) {
+ if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
struct lowmem_control *ctl;
ctl = LOWMEM_NEW_REQUEST();
if (ctl) {
LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
- CGROUP_LOW, get_root_memcg_info()->threshold_leave,
+ CGROUP_LOW, get_root_memcg_info()->threshold_leave_mb,
num_max_victims, medium_cb);
lowmem_queue_request(&lmw, ctl);
}
}
}
-static unsigned int check_mem_state(unsigned int available)
+static unsigned int check_mem_state(unsigned int available_mb)
{
int mem_state;
for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
- if (mem_state != MEM_LEVEL_OOM && available <= get_root_memcg_info()->threshold[mem_state])
+ if (mem_state != MEM_LEVEL_OOM &&
+ available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
break;
- else if (mem_state == MEM_LEVEL_OOM && available <= lmk_start_threshold)
+ else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
break;
}
/* setup memcg parameters depending on total ram size. */
static void setup_memcg_params(void)
{
- unsigned long long total_ramsize;
+ unsigned long total_ramsize_mb;
get_total_memory();
- total_ramsize = BYTE_TO_MBYTE(totalram);
+ total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
- _D("Total: %llu MB", total_ramsize);
- if (total_ramsize <= MEM_SIZE_64) {
+ _D("Total: %lu MB", total_ramsize_mb);
+ if (total_ramsize_mb <= MEM_SIZE_64) {
/* set thresholds for ram size 64M */
- proactive_threshold = PROACTIVE_64_THRES;
- proactive_leave = PROACTIVE_64_LEAVE;
+ proactive_threshold_mb = PROACTIVE_64_THRES;
+ proactive_leave_mb = PROACTIVE_64_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_256) {
+ } else if (total_ramsize_mb <= MEM_SIZE_256) {
/* set thresholds for ram size 256M */
- proactive_threshold = PROACTIVE_256_THRES;
- proactive_leave = PROACTIVE_256_LEAVE;
+ proactive_threshold_mb = PROACTIVE_256_THRES;
+ proactive_leave_mb = PROACTIVE_256_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_448) {
+ } else if (total_ramsize_mb <= MEM_SIZE_448) {
/* set thresholds for ram size 448M */
- proactive_threshold = PROACTIVE_448_THRES;
- proactive_leave = PROACTIVE_448_LEAVE;
+ proactive_threshold_mb = PROACTIVE_448_THRES;
+ proactive_leave_mb = PROACTIVE_448_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_512) {
+ } else if (total_ramsize_mb <= MEM_SIZE_512) {
/* set thresholds for ram size 512M */
- proactive_threshold = PROACTIVE_512_THRES;
- proactive_leave = PROACTIVE_512_LEAVE;
+ proactive_threshold_mb = PROACTIVE_512_THRES;
+ proactive_leave_mb = PROACTIVE_512_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_768) {
+ } else if (total_ramsize_mb <= MEM_SIZE_768) {
/* set thresholds for ram size 512M */
- proactive_threshold = PROACTIVE_768_THRES;
- proactive_leave = PROACTIVE_768_LEAVE;
+ proactive_threshold_mb = PROACTIVE_768_THRES;
+ proactive_leave_mb = PROACTIVE_768_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_1024) {
+ } else if (total_ramsize_mb <= MEM_SIZE_1024) {
/* set thresholds for ram size more than 1G */
- proactive_threshold = PROACTIVE_1024_THRES;
- proactive_leave = PROACTIVE_1024_LEAVE;
+ proactive_threshold_mb = PROACTIVE_1024_THRES;
+ proactive_leave_mb = PROACTIVE_1024_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
- } else if (total_ramsize <= MEM_SIZE_2048) {
- proactive_threshold = PROACTIVE_2048_THRES;
- proactive_leave = PROACTIVE_2048_LEAVE;
+ } else if (total_ramsize_mb <= MEM_SIZE_2048) {
+ proactive_threshold_mb = PROACTIVE_2048_THRES;
+ proactive_leave_mb = PROACTIVE_2048_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
memcg_set_leave_threshold(CGROUP_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
} else {
- proactive_threshold = PROACTIVE_3072_THRES;
- proactive_leave = PROACTIVE_3072_LEAVE;
+ proactive_threshold_mb = PROACTIVE_3072_THRES;
+ proactive_leave_mb = PROACTIVE_3072_LEAVE;
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
memcg_set_threshold(CGROUP_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
cur_oom_score_adj = pai->memory.oom_score_adj;
cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
- /* -1 means that this pid is not yet registered at the memory cgroup
+ /* This pid is not yet registered at the memory cgroup.
* plz, reference proc_create_app_info function
*/
if (cur_oom_score_adj != OOMADJ_APP_MAX + 10) {
static int lowmem_press_eventfd_read(int fd)
{
- uint64_t dummy_state;
+ unsigned long long dummy_state;
return read(fd, &dummy_state, sizeof(dummy_state));
}
static void lowmem_press_root_cgroup_handler(void)
{
- static unsigned int prev_available;
- unsigned int available;
+ static unsigned int prev_available_mb;
+ unsigned int available_mb;
int mem_state;
- available = proc_get_mem_available();
- if (prev_available == available)
+ available_mb = proc_get_mem_available();
+ if (prev_available_mb == available_mb)
return;
- mem_state = check_mem_state(available);
+ mem_state = check_mem_state(available_mb);
lowmem_trigger_memory_state_action(mem_state);
-
- prev_available = available;
+ prev_available_mb = available_mb;
}
static bool lowmem_press_eventfd_handler(int fd, void *data)
}
}
- return true;
+ return false;
}
static int lowmem_press_register_eventfd(struct memcg_info *mi)
const char *name = mi->name;
static fd_handler_h handler;
- if (mi->threshold[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
+ if (mi->threshold_mb[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
return 0;
evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
mi->evfd = evfd;
- _I("Register event fd success for %s cgroup", name);
add_fd_read_handler(evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
return 0;
}
lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
}
-int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold)
+int lowmem_trigger_reclaim(int flags, int victims, enum cgroup_type type, int threshold_mb)
{
struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
type = type > 0 ? type : CGROUP_LOW;
- threshold = threshold > 0 ? threshold : get_root_memcg_info()->threshold_leave;
+ threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
LOWMEM_SET_REQUEST(ctl, flags,
- type, threshold, victims,
+ type, threshold_mb, victims,
lowmem_force_reclaim_cb);
lowmem_queue_request(&lmw, ctl);
return 0;
}
-void lowmem_trigger_swap_reclaim(enum cgroup_type type, int swap_size)
+void lowmem_trigger_swap_reclaim(enum cgroup_type type, unsigned long long swap_size_bytes)
{
- int size, victims;
+ int size_mb, victims;
victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
- size = get_root_memcg_info()->threshold_leave + BYTE_TO_MBYTE(swap_size);
- _I("reclaim from swap module, type : %d, size : %d, victims: %d", type, size, victims);
- lowmem_trigger_reclaim(0, victims, type, size);
+ size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
+ lowmem_trigger_reclaim(0, victims, type, size_mb);
}
bool lowmem_fragmentated(void)
static void lowmem_proactive_oom_killer(int flags, char *appid)
{
- unsigned int before;
+ unsigned int before_mb;
int victims;
- before = proc_get_mem_available();
+ before_mb = proc_get_mem_available();
/* If memory state is medium or normal, just return and kill in oom killer */
- if (before < get_root_memcg_info()->threshold[MEM_LEVEL_OOM] || before > proactive_leave)
+ if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
+ before_mb > proactive_leave_mb)
return;
victims = num_max_victims > MAX_PROACTIVE_HIGH_VICTIMS
*/
struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
if (md) {
- unsigned int rss, after, size;
+ unsigned int rss_mb, after_mb, size_mb;
- rss = KBYTE_TO_MBYTE(md->avg_rss);
+ rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
free(md);
- after = before - rss;
+ after_mb = before_mb - rss_mb;
/*
* after launching app, ensure that available memory is
* above threshold_leave
*/
- if (after >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
+ if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
return;
- if (proactive_threshold - rss >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
- size = proactive_threshold;
+ if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
+ size_mb = proactive_threshold_mb;
else
- size = rss + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
+ size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
_D("history based proactive LMK : avg rss %u, available %u required = %u MB",
- rss, before, size);
- lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size);
+ rss_mb, before_mb, size_mb);
+ lowmem_trigger_reclaim(0, victims, CGROUP_LOW, size_mb);
return;
}
* run proactive oom killer only when available is larger than
* dynamic process threshold
*/
- if (!proactive_threshold || before >= proactive_threshold)
+ if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
return;
if (!(flags & PROC_LARGEMEMORY))
* free THRESHOLD_MARGIN more than real should be freed,
* because launching app is consuming up the memory.
*/
- _D("Run threshold based proactive LMK: memory level to reach: %u\n",
- proactive_leave + THRESHOLD_MARGIN);
- lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave + THRESHOLD_MARGIN);
+ _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
+ proactive_leave_mb + THRESHOLD_MARGIN);
+ lowmem_trigger_reclaim(0, victims, CGROUP_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
}
unsigned int lowmem_get_proactive_thres(void)
{
- return proactive_threshold;
+ return proactive_threshold_mb;
}
static int lowmem_prelaunch_handler(void *data)
static inline int calculate_threshold_size(double ratio)
{
- int size = (double)totalram * ratio / 100.0;
- return BYTE_TO_MBYTE(size);
+ unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
+ return BYTE_TO_MBYTE(size_bytes);
}
static void load_configs(const char *path)
for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
if (memcg_conf->cgroup_limit[cgroup] > 0.0)
memcg_info_set_limit(get_memcg_info(cgroup),
- memcg_conf->cgroup_limit[cgroup]/100.0, totalram);
+ memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
}
/* set MemoryLevelThreshold section */
if (lvl == MEM_LEVEL_OOM)
memcg_set_leave_threshold(CGROUP_ROOT,
- get_memcg_info(CGROUP_ROOT)->threshold[lvl] * 2);
+ get_memcg_info(CGROUP_ROOT)->threshold_mb[lvl] * 2);
}
else if (memcg_conf->threshold[lvl].threshold > 0) {
memcg_set_threshold(CGROUP_ROOT, lvl,
if (lvl == MEM_LEVEL_OOM)
memcg_set_leave_threshold(CGROUP_ROOT,
- get_memcg_info(CGROUP_ROOT)->threshold[lvl] * 2);
+ get_memcg_info(CGROUP_ROOT)->threshold_mb[lvl] * 2);
}
}
oom_popup_enable = memcg_conf->oom_popup;
/* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
- lowmem_memory_init(memcg_conf->service.memory, memcg_conf->widget.memory,
- memcg_conf->guiapp.memory, memcg_conf->background.memory);
+ lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
+ memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
memcg_conf->guiapp.action, memcg_conf->background.action);
{
/* print info of Memory section */
for (int cgroup = CGROUP_VIP; cgroup < CGROUP_END; cgroup++) {
- _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %u bytes",
- convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit);
+ _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
+ convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
}
for (int cgroup = CGROUP_ROOT; cgroup < CGROUP_END; cgroup++) {
for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
_I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", convert_cgroup_type_to_str(cgroup),
- convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold[mem_lvl]);
+ convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
}
}
_I("[LMK] set number of max victims as %d", num_max_victims);
- _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave);
- _I("[LMK] set proactive threshold to %u MB", proactive_threshold);
- _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave);
+ _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
+ _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
+ _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
/* print info of POPUP section */
_I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
if (force) {
mem_state = state;
} else {
- unsigned int available = proc_get_mem_available();
- mem_state = check_mem_state(available);
+ unsigned int available_mb = proc_get_mem_available();
+ mem_state = check_mem_state(available_mb);
}
lowmem_trigger_memory_state_action(mem_state);
unsigned long lowmem_get_ktotalram(void)
{
- return ktotalram;
+ return totalram_kb;
}
-unsigned long lowmem_get_totalram(void)
+unsigned long long lowmem_get_totalram(void)
{
- return totalram;
+ return totalram_bytes;
}
void lowmem_restore_memcg(struct proc_app_info *pai)
#include "const.h"
#include "file-helper.h"
-#define DEDUP_PRIORITY 20
-#define DEDUP_ON_BOOT_TIME 60
-#define DEDUP_FULL_SCAN_INTERVAL 60
-#define DEDUP_INIT_SCAN_INTERVAL 300
-#define DEDUP_STAT_INTERVAL 60
+#define DEDUP_PRIORITY 20
+#define DEDUP_ON_BOOT_TIME 60
+#define DEDUP_FULL_SCAN_INTERVAL 60
+#define DEDUP_INIT_SCAN_INTERVAL 300
+#define DEDUP_STAT_INTERVAL 60
enum dedup_thread_op {
DEDUP_OP_ACTIVATE,
static enum dedup_mode dedup_mode = DEDUP_MODE_PERIODIC;
static bool dedup_on_lowmem = false;
-static int dedup_at_boot_delay = 60000;
-static int dedup_full_scan_interval = 600000;
-static int dedup_stat_interval = 60000;
-static int dedup_partial_scan_interval = 60000;
+static int dedup_at_boot_delay_sec = 60;
+static int dedup_full_scan_interval_sec = 600;
+static int dedup_stat_interval_sec = 60;
+static int dedup_partial_scan_interval_sec = 10;
static GSource *dedup_activating_timer = NULL;
static GSource *dedup_scanning_timer = NULL;
enum ksm_param {
KSM_PARAM_PAGES_TO_SCAN = 0,
KSM_PARAM_SLEEP_MSECS,
- KSM_PARAM_FULL_SCAN_INTERVAL,
+ KSM_PARAM_FULL_SCAN_INTERVAL_MSECS,
KSM_PARAM_SCAN_BOOST,
KSM_PARAM_MAX,
};
static int ksm_param_ranges[KSM_PARAM_MAX][2] = {
{0, 10000}, /* KSM_PARAM_PAGES_TO_SCAN */
{0, 1000}, /* KSM_PARAM_SLEEP_MSECS */
- {0, INT_MAX}, /* KSM_PARAM_FULL_SCAN_INTERVAL */
+ {0, INT_MAX}, /* KSM_PARAM_FULL_SCAN_INTERVAL_MSECS */
{100, 10000}, /* KSM_PARAM_SCAN_BOOST */
};
static unsigned int ksm_params[KSM_PARAM_MAX];
static void dedup_reset_scanning_timer(void)
{
- _D("reset scan-timer %d seconds", dedup_full_scan_interval);
+ _D("reset scan-timer %d seconds", dedup_full_scan_interval_sec);
dedup_scanning_timer =
- g_timeout_source_new_seconds(dedup_full_scan_interval);
+ g_timeout_source_new_seconds(dedup_full_scan_interval_sec);
g_source_set_callback(dedup_scanning_timer,
dedup_scanning_timer_cb, NULL, NULL);
g_source_attach(dedup_scanning_timer, NULL);
static void dedup_reset_stat_timer(void)
{
- _D("reset stat-timer %d seconds", dedup_stat_interval);
+ _D("reset stat-timer %d seconds", dedup_stat_interval_sec);
dedup_stat_timer =
- g_timeout_source_new_seconds(dedup_stat_interval);
+ g_timeout_source_new_seconds(dedup_stat_interval_sec);
g_source_set_callback(dedup_stat_timer,
dedup_stat_timer_cb, NULL, NULL);
g_source_attach(dedup_stat_timer, NULL);
}
if (!dedup_on_lowmem) {
- dedup_scanning_timer = g_timeout_source_new_seconds(dedup_full_scan_interval);
+ dedup_scanning_timer = g_timeout_source_new_seconds(dedup_full_scan_interval_sec);
g_source_set_callback(dedup_scanning_timer, dedup_scanning_timer_cb, NULL, NULL);
g_source_attach(dedup_scanning_timer, NULL);
}
- dedup_stat_timer = g_timeout_source_new_seconds(dedup_stat_interval);
+ dedup_stat_timer = g_timeout_source_new_seconds(dedup_stat_interval_sec);
g_source_set_callback(dedup_stat_timer, dedup_stat_timer_cb, NULL, NULL);
g_source_attach(dedup_stat_timer, NULL);
dedup_activated = true;
}
-/* translations for ms -> ns and s -> ns */
-#define DEDUP_ACT_STOMS 1000
-#define DEDUP_ACT_MSTONS 1000000
+#define DEDUP_ACT_MSTOS 1000
+#define DEDUP_ACT_NSTOMS 1000000
static bool dedup_check_scan_interval
-(struct timespec *now, struct timespec *old, unsigned long interval)
+(struct timespec *now, struct timespec *old, unsigned long interval_ms)
{
- unsigned long diff;
- diff = (now->tv_sec - old->tv_sec) * DEDUP_ACT_STOMS;
- diff += (now->tv_nsec - old->tv_nsec) / DEDUP_ACT_MSTONS;
- return (diff >= interval);
+ unsigned long diff_ms;
+ diff_ms = (now->tv_sec - old->tv_sec) * DEDUP_ACT_MSTOS;
+ diff_ms += (now->tv_nsec - old->tv_nsec) / DEDUP_ACT_NSTOMS;
+ return (diff_ms >= (interval_ms * DEDUP_ACT_MSTOS));
}
/* used in dedup_do_scan */
* substituted by partial scan
*/
if (dedup_check_scan_interval(&now, &full_begin,
- dedup_full_scan_interval))
+ dedup_full_scan_interval_sec))
mode = KSM_SCAN_FULL;
else if (dedup_on_lowmem &&
dedup_check_scan_interval(&now, &partial_begin,
- dedup_partial_scan_interval))
+ dedup_partial_scan_interval_sec))
mode = KSM_SCAN_PARTIAL;
} else if (scan_mode == KSM_SCAN_PARTIAL) {
if (dedup_check_scan_interval(&now, &partial_begin,
- dedup_partial_scan_interval))
+ dedup_partial_scan_interval_sec))
mode = KSM_SCAN_PARTIAL;
}
/* if dedup_at_boot_enable is disabled,
* other daemon should activate dedup */
_D("[DEDUP] dedup booting done is called");
- if (dedup_at_boot_delay > 0)
+ if (dedup_at_boot_delay_sec > 0)
dedup_activating_timer =
- g_timeout_source_new_seconds(dedup_at_boot_delay);
+ g_timeout_source_new_seconds(dedup_at_boot_delay_sec);
else
dedup_activating_timer =
g_timeout_source_new_seconds(DEDUP_ON_BOOT_TIME);
static int dedup_parse_config_file(void)
{
int arg_ksm_pages_to_scan = 100;
- int arg_ksm_sleep = 20; // 20 msecs
- int arg_ksm_full_scan_interval = 60000; // 60 seconds
- int arg_ksm_scan_boost = 100;
+ int arg_ksm_sleep_ms = 20; // 20 msecs
+ int arg_ksm_full_scan_interval_ms = 60000; // 60 seconds
+ int arg_ksm_scan_boost = 1000;
struct dedup_conf *dedup_conf = get_dedup_conf();
if (!dedup_conf) {
else
ksm_params[KSM_PARAM_SCAN_BOOST] = arg_ksm_scan_boost;
- ksm_params[KSM_PARAM_SLEEP_MSECS] = arg_ksm_sleep;
- ksm_params[KSM_PARAM_FULL_SCAN_INTERVAL] = arg_ksm_full_scan_interval;
+ ksm_params[KSM_PARAM_SLEEP_MSECS] = arg_ksm_sleep_ms;
+ ksm_params[KSM_PARAM_FULL_SCAN_INTERVAL_MSECS] = arg_ksm_full_scan_interval_ms;
_I("[DEDUP] deduplication mode: %s", dedup_mode == DEDUP_MODE_PERIODIC ?
"kernel-managed" : "resourced-triggered");
_I("[DEDUP] deduplication on boot: %s", dedup_at_boot_enable ? "true" : "false");
_I("[DEDUP] scanning is invoked by %s", dedup_on_lowmem ?
"LOWMEM event" : "periodic timer");
- _I("[DEDUP] full scan interval: %d sec", dedup_full_scan_interval);
- _I("[DEDUP] stat monitoring interval: %d sec", dedup_stat_interval);
+ _I("[DEDUP] full scan interval: %d sec", dedup_full_scan_interval_sec);
+ _I("[DEDUP] stat monitoring interval: %d sec", dedup_stat_interval_sec);
_I("[DEDUP] ksm pages to scan: %d", ksm_params[KSM_PARAM_PAGES_TO_SCAN]);
- _I("[DEDUP] ksm sleep time: %d", ksm_params[KSM_PARAM_SLEEP_MSECS]);
- _I("[DEDUP] ksm full scan interval: %d", ksm_params[KSM_PARAM_FULL_SCAN_INTERVAL]);
+ _I("[DEDUP] ksm sleep time: %d ms", ksm_params[KSM_PARAM_SLEEP_MSECS]);
+ _I("[DEDUP] ksm full scan interval: %d ms", ksm_params[KSM_PARAM_FULL_SCAN_INTERVAL_MSECS]);
_I("[DEDUP] ksm scan boost: %d", ksm_params[KSM_PARAM_SCAN_BOOST]);
free_dedup_conf();
struct swap_file_control {
char crypt_type[MAX_TYPE_LENGTH];
char swapfile[64];
- long swap_file_size;
- unsigned long swap_reclaim_bytes;
+ unsigned long long swap_file_bytes;
+ unsigned long long swap_reclaim_bytes;
};
static struct swap_file_control file_control = {
.crypt_type = "aes",
.swapfile = SWAP_FILE_NAME,
- .swap_file_size = DEFAULT_SWAP_FILE_SIZE,
+ .swap_file_bytes = DEFAULT_SWAP_FILE_SIZE,
.swap_reclaim_bytes = 0,
};
struct swap_module_ops *swap = (struct swap_module_ops *)data;
file_control.swap_reclaim_bytes =
- file_control.swap_file_size * FILESWAP_FULLNESS_RATIO;
+ file_control.swap_file_bytes * FILESWAP_FULLNESS_RATIO;
return swap_set_file(file_control.swapfile, swap, file_control.crypt_type);
}
{
struct swap_module_ops *swap = (struct swap_module_ops *)data;
_cleanup_proc_swaps_free_ struct proc_swaps **swaps = NULL;
- unsigned int swap_size = 0;
+ unsigned long long swap_size = 0;
int n, i;
n = proc_get_swaps(&swaps);
{
struct swap_module_ops *swap = (struct swap_module_ops *)data;
- swap->k_size = BYTE_TO_KBYTE(file_control.swap_file_size);
+ swap->k_size = BYTE_TO_KBYTE(file_control.swap_file_bytes);
return 0;
}
static int swap_file_conf(void *data)
{
_I("[SWAP] fileswap crypt type = %s", file_control.crypt_type);
- _I("[SWAP] fileswap file size = %ld", file_control.swap_file_size);
+ _I("[SWAP] fileswap file size = %llu bytes", file_control.swap_file_bytes);
return RESOURCED_ERROR_NONE;
}
#define SWAP_EARLYRECLAIM_TIME_DEFAULT 60
#define SWAP_EARLYRECLAIM_INTERVAL 1
#define SWAP_EARLYRECLAIM_MAXTRY 2
-#define SWAP_EARLYRECLAIM_THRESHOLD_DEFAULT MBYTE_TO_BYTE(1024)
+#define SWAP_EARLYRECLAIM_THRESHOLD_DEFAULT MBYTE_TO_BYTE(20)
#define EARLYRECLAIM_WITH_AN_EXPLANATION_FOR_LAYMEN "early memory reclaim (done to retrieve resources used by daemons during system start-up)"
static bool arg_swap_enable = false;
static bool arg_swap_at_boot = false;
-static int arg_timer_swap_at_boot = SWAP_EARLYRECLAIM_TIME_DEFAULT;
+static int arg_timer_swap_at_boot_sec = SWAP_EARLYRECLAIM_TIME_DEFAULT;
static enum swap_type arg_swap_type = SWAP_TYPE_ZRAM;
static int current_swappiness = SWAP_MEMCG_SWAPPINESS;
static GSList *swap_module; /* module list */
static GSource *swap_activating_timer = NULL;
-static unsigned long arg_swap_at_boot_threshold = SWAP_EARLYRECLAIM_THRESHOLD_DEFAULT;
+static unsigned long long arg_swap_at_boot_threshold_bytes = SWAP_EARLYRECLAIM_THRESHOLD_DEFAULT;
static int arg_swap_at_boot_maxtry = SWAP_EARLYRECLAIM_MAXTRY;
-static int arg_swap_at_boot_interval = SWAP_EARLYRECLAIM_INTERVAL;
+static int arg_swap_at_boot_interval_sec = SWAP_EARLYRECLAIM_INTERVAL;
static int swap_sort_func(const struct swap_module_ops *a,
const struct swap_module_ops *b)
void swap_add(const struct swap_module_ops *ops)
{
- _I("Swap module name: %s", ops->name);
swap_module = g_slist_insert_sorted(swap_module,
(gpointer)ops,
(GCompareFunc) swap_sort_func);
return "";
}
-static unsigned int swap_calculate_hard_limit_in_bytes(unsigned int mem_subcg_usage)
+static inline unsigned long long swap_calculate_hard_limit_in_bytes(unsigned long long mem_subcg_usage_bytes)
{
- return (unsigned int)((float)mem_subcg_usage * swap_hard_limit_fraction);
+ return (unsigned long long)((double)mem_subcg_usage_bytes * swap_hard_limit_fraction);
}
static inline void swap_add_bundle(struct swap_thread_bundle *bundle)
return error;
}
-
static int swap_use_hard_limit(char *memcg)
{
int ret;
- unsigned int usage, memcg_limit;
+ unsigned long long usage_bytes, memcg_limit_bytes;
- ret = cgroup_read_node_uint32(memcg, MEMCG_USAGE, &usage);
+ ret = cgroup_read_node_ulonglong(memcg, MEMCG_USAGE, &usage_bytes);
if (ret != RESOURCED_ERROR_NONE)
- usage = 0;
+ usage_bytes = 0;
- memcg_limit = swap_calculate_hard_limit_in_bytes(usage);
- _D("Swap request: %s cgroup usage is %u, hard limit set to %u (hard limit fraction %f)",
- memcg, usage, memcg_limit, swap_hard_limit_fraction);
- if (memcg_limit != 0)
- ret = check_oom_and_set_limit(memcg, memcg_limit);
+ memcg_limit_bytes = swap_calculate_hard_limit_in_bytes(usage_bytes);
+ _D("[SWAP] Swap request: %s cgroup usage is %llu bytes, hard limit set to %llu bytes (hard limit fraction %f)",
+ memcg, usage_bytes, memcg_limit_bytes, swap_hard_limit_fraction);
+ if (memcg_limit_bytes != 0)
+ ret = check_oom_and_set_limit(memcg, memcg_limit_bytes);
else {
/* If the group is empty don't set the limit to enable adding processes. */
ret = cgroup_write_node_int32(memcg, MEMCG_SWAP_LIMIT_BYTE, -1);
static int swap_use_force_reclaim(char *memcg)
{
int ret, len;
+ unsigned long long usage_bytes;
int try = SWAP_FORCE_RECLAIM_NUM_MAX;
- unsigned int usage, nr_to_reclaim;
- unsigned int total_reclaim = 0;
+ unsigned int nr_to_reclaim;
+ unsigned int total_reclaim_pages = 0;
bool root_memcg = false;
/*
* anoynymous memory usage.
*/
if (root_memcg)
- ret = cgroup_read_node_uint32(memcg, MEMCG_USAGE, &usage);
+ ret = cgroup_read_node_ulonglong(memcg, MEMCG_USAGE, &usage_bytes);
else
- ret = memcg_get_anon_usage(memcg, &usage);
+ ret = memcg_get_anon_usage(memcg, &usage_bytes);
+
if (ret != RESOURCED_ERROR_NONE)
- usage = 0;
+ usage_bytes = 0;
- nr_to_reclaim = BYTE_TO_PAGE(usage);
+ nr_to_reclaim = BYTE_TO_PAGE(usage_bytes);
if (nr_to_reclaim <= SWAP_RECLAIM_PAGES_MIN)
break; /* don't reclaim if little gain */
if (nr_to_reclaim > SWAP_RECLAIM_PAGES_MAX)
nr_to_reclaim = SWAP_RECLAIM_PAGES_MAX;
- total_reclaim += nr_to_reclaim;
+ total_reclaim_pages += nr_to_reclaim;
ret = cgroup_write_node_uint32(memcg, MEMCG_FORCE_RECLAIM,
nr_to_reclaim);
if (ret != RESOURCED_ERROR_NONE)
try -= 1;
} while (try > 0);
- _D("FORCE_RECLAIM tried %u pages from %s", total_reclaim, memcg);
+ _D("[SWAP] FORCE_RECLAIM tried %u pages from %s", total_reclaim_pages, memcg);
return ret;
}
return fail ? fail : 0;
}
-//static int swap_reclaim_memcg(struct swap_status_msg msg)
static int swap_reclaim_memcg(char *path)
{
int r;
*/
if (!early_reclaim) {
int try = arg_swap_at_boot_maxtry;
- unsigned int usage, prev_usage = 0;
+ unsigned long long usage_bytes, prev_usage_bytes = 0;
- r = cgroup_read_node_uint32(MEMCG_PATH,
- MEMCG_SWAP_USAGE, &prev_usage);
+ r = cgroup_read_node_ulonglong(MEMCG_PATH,
+ MEMCG_SWAP_USAGE, &prev_usage_bytes);
if (r)
- prev_usage = UINT_MAX;
+ prev_usage_bytes = ULONG_MAX;
for (;;) {
try--;
swap_start_reclaim(MEMCG_PATH);
- r = cgroup_read_node_uint32(MEMCG_PATH,
- MEMCG_SWAP_USAGE, &usage);
+ r = cgroup_read_node_ulonglong(MEMCG_PATH,
+ MEMCG_SWAP_USAGE, &usage_bytes);
if (r) {
_E("Early reclaimed is aborted");
break;
* The default threshold is very large, so it may
* reclaim once.
*/
- if (!try || prev_usage - usage < arg_swap_at_boot_threshold)
+ if (!try || prev_usage_bytes - usage_bytes < arg_swap_at_boot_threshold_bytes)
break;
- prev_usage = usage;
+ prev_usage_bytes = usage_bytes;
/*
* To prevent continuous reclaim to harm entire system,
* having relaxation on each reclaim
*/
- sleep(arg_swap_at_boot_interval);
+ sleep(arg_swap_at_boot_interval_sec);
}
early_reclaim = true;
}
* partially to keep things simple but primarily because it can
* introduce an artificial delay since the timer is ran async
* in another thread. */
- if (arg_timer_swap_at_boot == 0) {
+ if (arg_timer_swap_at_boot_sec == 0) {
swap_activate_timer_cb(NULL);
return RESOURCED_ERROR_NONE;
}
- _D("booting done; starting up a timer to perform an " EARLYRECLAIM_WITH_AN_EXPLANATION_FOR_LAYMEN " %ds from now", arg_timer_swap_at_boot);
- swap_activating_timer = g_timeout_source_new_seconds((guint) arg_timer_swap_at_boot);
+ _D("booting done; starting up a timer to perform an " EARLYRECLAIM_WITH_AN_EXPLANATION_FOR_LAYMEN " %ds from now", arg_timer_swap_at_boot_sec);
+ swap_activating_timer = g_timeout_source_new_seconds((guint) arg_timer_swap_at_boot_sec);
g_source_set_callback(swap_activating_timer, swap_activate_timer_cb, NULL, NULL);
g_source_attach(swap_activating_timer, NULL);
if (swap_node == SWAP_NODE_FORCE_RECLAIM)
return RESOURCED_ERROR_NONE;
- ret = check_oom_and_set_limit(mi->name, mi->limit);
+ ret = check_oom_and_set_limit(mi->name, mi->limit_bytes);
if (ret != RESOURCED_ERROR_NONE)
_E("Failed to change hard limit of %s cgroup to -1", mi->name);
else
{
pid_t pid;
struct cgroup *cgroup_swap;
-// struct swap_status_msg ss_msg;
do_expr_unless_g_variant_get_typechecked(return, params, "(i)", &pid);
if (pid <= 0) {
if (!cgroup_swap)
return;
swap_move_to_cgroup_by_pid(CGROUP_LOW, pid);
-/* ss_msg.pid = pid;
- ss_msg.type = CGROUP_LOW;
- ss_msg.memcg_info = cgroup_swap->memcg_info;*/
swap_start_handler(cgroup_swap->memcg_info->name);
_I("swap cgroup entered : pid : %d", (int)pid);
}
static int resourced_swap_check_runtime_support(void *data)
{
int r;
- uint32_t usage;
+ unsigned long long usage_bytes;
/*
* Check whether CONFIG_SWAP is enabled in kernel.
/*
* Check whether kernel is supporting MEMCG_SWAP.
*/
- r = cgroup_read_node_uint32(MEMCG_PATH,
- MEMCG_SWAP_USAGE, &usage);
+ r = cgroup_read_node_ulonglong(MEMCG_PATH,
+ MEMCG_SWAP_USAGE, &usage_bytes);
if (r)
return -ENOENT;
int max_comp_streams;
char comp_algorithm[MAX_TYPE_LENGTH];
float ratio;
- unsigned long zram_reclaim_bytes;
+ unsigned long long zram_reclaim_bytes;
};
static struct swap_zram_control zram_control = {
static int swap_zram_compact(void)
{
- unsigned int total;
- static unsigned int last_total;
+ unsigned long long total_bytes;
+ static unsigned long long last_total_bytes;
int r;
_D("call zram compact");
- r = fread_uint(SWAP_ZRAM_MEM_USED_TOTAL, &total);
+ r = fread_ulonglong(SWAP_ZRAM_MEM_USED_TOTAL, &total_bytes);
if (r < 0) {
_E("fail to read %s", SWAP_ZRAM_MEM_USED_TOTAL);
return r;
* Until zram size not increased of at least 1 MB from last compaction
* then it not makes any sense to compact it again.
*/
- if ((total - last_total) < MBYTE_TO_BYTE(1))
+ if ((total_bytes - last_total_bytes) < MBYTE_TO_BYTE(1))
return -ENOMEM;
- last_total = total;
+ last_total_bytes = total_bytes;
r = fwrite_int(SWAP_ZRAM_COMPACT, 1);
if (r < 0) {
_E("fail to write %s", SWAP_ZRAM_COMPACT);
return r;
}
- r = fread_uint(SWAP_ZRAM_MEM_USED_TOTAL, &total);
+ r = fread_ulonglong(SWAP_ZRAM_MEM_USED_TOTAL, &total_bytes);
if (r < 0) {
_E("fail to read %s", SWAP_ZRAM_MEM_USED_TOTAL);
return r;
static int swap_zram_activate(void *data)
{
struct swap_module_ops *swap = (struct swap_module_ops *)data;
- unsigned int swap_size_bytes, read_size_bytes;
+ unsigned long long swap_size_bytes, read_size_bytes;
int r;
swap_size_bytes = KBYTE_TO_BYTE(swap->k_size);
}
}
- r = fread_uint(SWAP_ZRAM_DISK_SIZE, &read_size_bytes);
+ r = fread_ulonglong(SWAP_ZRAM_DISK_SIZE, &read_size_bytes);
if (r < 0) {
_E("fail to read zram disk_size");
return r;
/* disksize can be pre-fixed by other means, do not set size in that case */
if (read_size_bytes == 0) {
- r = fwrite_uint(SWAP_ZRAM_DISK_SIZE, swap_size_bytes);
+ r = fwrite_ulonglong(SWAP_ZRAM_DISK_SIZE, swap_size_bytes);
if (r < 0) {
_E("fail to write disk_size");
return r;
}
- r = fread_uint(SWAP_ZRAM_DISK_SIZE, &read_size_bytes);
+ r = fread_ulonglong(SWAP_ZRAM_DISK_SIZE, &read_size_bytes);
if (r < 0) {
_E("fail to read zram disk_size");
return r;
/* Check if zram was sucessfully initialized (zcomp rollback case) */
if (read_size_bytes < swap_size_bytes) {
- _E("swap size (%d) less than expected swap size (%d)",
+ _E("swap size (%llu) less than expected swap size (%llu)",
read_size_bytes, swap_size_bytes);
return RESOURCED_ERROR_OUT_OF_MEMORY;
}
static int swap_zram_reclaim(void *data)
{
int r, type;
- static unsigned int swap_total = 0;
+ static unsigned long long swap_total_bytes = 0;
static bool zram_compact;
- unsigned long swap_available;
- unsigned int swap_usage;
+ unsigned long long swap_available_bytes;
+ unsigned long long swap_usage_bytes;
float swapcg_usage_ratio;
unsigned int ram_available;
- swap_available = KBYTE_TO_BYTE(proc_get_swap_free());
-
- _D("swap available %lu, reclaimg byte %lu", swap_available, zram_control.zram_reclaim_bytes);
+ swap_available_bytes = KBYTE_TO_BYTE(proc_get_swap_free());
/*
* Most kernel doesn't support migration and compaction of zmalloc.
return -ENOSPC;
}
- if (swap_available >= zram_control.zram_reclaim_bytes)
+ if (swap_available_bytes >= zram_control.zram_reclaim_bytes)
return 0;
if (!zram_compact) {
* If swap usage of this cgroup is higher, run LMK about background applications.
* Otherwise, need to check all processes in order to find mallicious process.
*/
- if (!swap_total)
- swap_total = proc_get_swap_total();
+ if (!swap_total_bytes)
+ swap_total_bytes = KBYTE_TO_BYTE(proc_get_swap_total());
- r = memcg_get_swap_usage(MEMCG_LOW_GROUP_PATH, &swap_usage);
+ r = memcg_get_swap_usage(MEMCG_LOW_GROUP_PATH, &swap_usage_bytes);
if (r)
return r;
- swapcg_usage_ratio = (float)(swap_usage / (swap_total - swap_available) *100);
+ swapcg_usage_ratio = (float)(swap_usage_bytes / (swap_total_bytes - swap_available_bytes) *100);
if (swapcg_usage_ratio > SWAPCG_CHECK_RATIO)
type = CGROUP_LOW;
else
char crypt_type[MAX_TYPE_LENGTH];
char swapfile[64];
float zpool_ratio;
- long zswap_file_size;
- unsigned long zswap_reclaim_bytes;
+ unsigned long long zswap_file_bytes;
+ unsigned long long zswap_reclaim_bytes;
char zpool_type[MAX_TYPE_LENGTH];
};
.crypt_type = "aes",
.swapfile = SWAP_FILE_NAME,
.zpool_ratio = DEFAULT_ZSWAP_POOL_RATIO,
- .zswap_file_size = DEFAULT_ZSWAP_FILE_SIZE,
+ .zswap_file_bytes = DEFAULT_ZSWAP_FILE_SIZE,
.zswap_reclaim_bytes = 0,
.zpool_type = "zbud",
};
int r;
zswap_control.zswap_reclaim_bytes =
- zswap_control.zswap_file_size * ZSWAP_FULLNESS_RATIO;
+ zswap_control.zswap_file_bytes * ZSWAP_FULLNESS_RATIO;
r = swap_set_file(zswap_control.swapfile, swap, zswap_control.crypt_type);
if (r < 0)
static int swap_zswap_reclaim(void *data)
{
int r;
- unsigned int swap_size;
+ unsigned int swap_size_pages;
+ unsigned long long swap_size_bytes;
- r = fread_uint(ZSWAP_WRITTEN_SIZE, &swap_size);
+ r = fread_uint(ZSWAP_WRITTEN_SIZE, &swap_size_pages);
if (r < 0) {
_E("fail to read written swap size");
return r;
}
- swap_size <<= PAGE_SHIFT;
- if (swap_size <= zswap_control.zswap_reclaim_bytes)
+ swap_size_bytes = swap_size_pages <<= PAGE_SHIFT;
+ if (swap_size_bytes <= zswap_control.zswap_reclaim_bytes)
return 0;
/*
* So, it requires to trigger proactive oom killer.
*/
- lowmem_trigger_swap_reclaim(CGROUP_ROOT, swap_size);
+ lowmem_trigger_swap_reclaim(CGROUP_ROOT, swap_size_bytes);
return -ENOSPC;
}
if (access(ZSWAP_POOL_PERCENT, R_OK) != 0)
return -ENOENT;
- swap->k_size = BYTE_TO_KBYTE(zswap_control.zswap_file_size);
+ swap->k_size = BYTE_TO_KBYTE(zswap_control.zswap_file_bytes);
return 0;
}
memset(zswap_control.zpool_type, 0, MAX_TYPE_LENGTH);
_I("[SWAP] zswap type = %s", zswap_control.crypt_type);
- _I("[SWAP] zswap filesize = %ld", zswap_control.zswap_file_size);
+ _I("[SWAP] zswap filesize = %llu bytes", zswap_control.zswap_file_bytes);
_I("[SWAP] zswap pool ratio = %f", zswap_control.zpool_ratio);
_I("[SWAP] zswap pool type = %s", zswap_control.zpool_type);
}
/* register a notification when this service memory is over a threshold */
- if (pci->mem_action.memory && pci->mem_action.action) {
+ if (pci->mem_action.memory_bytes && pci->mem_action.action) {
struct proc_limit_status pls = {0, };
pls.ps.pid = pid;
pls.ps.pci = pci;
- pls.limit = pci->mem_action.memory;
+ pls.limit_bytes = pci->mem_action.memory_bytes;
pls.action = pci->mem_action.action;
resourced_notify(RESOURCED_NOTIFIER_LIMIT_SYSTEM_SERVICE, &pls);
}