Fix the file names of the module 'lowmem' 03/286503/1
authorSangYoun Kwak <sy.kwak@samsung.com>
Mon, 9 Jan 2023 02:29:40 +0000 (11:29 +0900)
committerSangYoun Kwak <sy.kwak@samsung.com>
Mon, 9 Jan 2023 05:26:35 +0000 (14:26 +0900)
The header and source files of 'lowmem' were fixed:
    src/resource-limiter/memory/vmpressure-lowmem-handler.c
        -> src/resource-limiter/memory/lowmem.c
    src/resource-limiter/memory/lowmem-handler.h
        -> src/resource-limiter/memory/lowmem.h
As the header(lowmem.h) was renamed, source files
containing 'lowmem-handler.h' were modified.
(#include 'lowmem-handler.h' -> #include 'lowmem.h')
    src/process/proc-main.c
    src/resource-limiter/memory/lowmem-dbus.c
    src/resource-limiter/memory/lowmem-limit.c
    src/resource-limiter/memory/lowmem-system.c
    src/resource-optimizer/memory/swap/fileswap.c
    src/resource-optimizer/memory/swap/zramswap.c
    src/resource-optimizer/memory/swap/zswap.c

Change-Id: I9663719d96b0e86d100222b9692ea12423e42bc7
Signed-off-by: SangYoun Kwak <sy.kwak@samsung.com>
src/process/proc-main.c
src/resource-limiter/memory/lowmem-dbus.c
src/resource-limiter/memory/lowmem-handler.h [deleted file]
src/resource-limiter/memory/lowmem-limit.c
src/resource-limiter/memory/lowmem-system.c
src/resource-limiter/memory/lowmem.c [new file with mode: 0644]
src/resource-limiter/memory/lowmem.h [new file with mode: 0644]
src/resource-limiter/memory/vmpressure-lowmem-handler.c [deleted file]
src/resource-optimizer/memory/swap/fileswap.c
src/resource-optimizer/memory/swap/zramswap.c
src/resource-optimizer/memory/swap/zswap.c

index d295bba0ccb69091b9668af0cef0ea41b6f12064..8268281724372f3d7ca7b2ae48fbfc11b914719c 100644 (file)
@@ -37,7 +37,7 @@
 #include <sys/resource.h>
 
 
-#include "lowmem-handler.h"
+#include "lowmem.h"
 #include "freezer.h"
 #include "notifier.h"
 #include "proc-process.h"
index 530a3162ad4de7743a72261557620a66507abc36..268e1b14376237318553a7c4c4de42c813be36c8 100644 (file)
@@ -26,7 +26,7 @@
  */
 
 #include "trace.h"
-#include "lowmem-handler.h"
+#include "lowmem.h"
 #include "dbus-handler.h"
 #include "resourced.h"
 #include "macro.h"
diff --git a/src/resource-limiter/memory/lowmem-handler.h b/src/resource-limiter/memory/lowmem-handler.h
deleted file mode 100644 (file)
index 154ff12..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * resourced
- *
- * Copyright (c) 2013 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * @file lowmem_handler.h
- * @desc handler function for setting memcgroup memory controller and
- *     receiving event fd.
- **/
-
-#ifndef __LOWMEM_HANDLER_H__
-#define __LOWMEM_HANDLER_H__
-
-#include <proc-common.h>
-#include <memory-cgroup.h>
-#include "fd-handler.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-#define MAX_MEMORY_CGROUP_VICTIMS      10
-
-
-struct task_info {
-       /*
-        * Mostly, there are not multiple processes with the same pgid.
-        * So, for the frequent case, we use pid variable to avoid
-        * allocating arrays.
-        */
-       pid_t pid;
-       GArray *pids;
-       pid_t pgid;
-       /* oom_score_adj is smae as /proc/<pid>/oom_score_adj */
-       int oom_score_adj;
-       /*
-        * oom_score_lru is same as oom_score_adj or adjusted by
-        * proc_app_info lru_state for apps that are marked as favourite.
-        *
-        * oom_score_lru is the main value used in comparison for LMK.
-        */
-       int oom_score_lru;
-       int size;
-       struct proc_app_info *pai;
-};
-
-struct memory_limit_event {
-       int fd;
-       unsigned long long threshold_bytes;             /* byte */
-       char *path;
-       enum proc_action action;
-};
-
-/**
- * @desc execute /usr/bin/memps and make log file with pid and process name
- */
-//void make_memps_log(enum mem_log path, pid_t pid, char *victim_name);
-
-void lowmem_memory_init(unsigned long long service_limit_bytes, unsigned long long widget_limit_bytes,
-               unsigned long long guiapp_limit_bytes, unsigned long long bgapp_limit_bytes);
-void lowmem_action_init(int service_action, int widget_action,
-               int guiapp_action, int bgapp_action);
-int lowmem_limit_set_app(unsigned long long limit_bytes, struct proc_app_info *pai,
-               enum proc_action action);
-int lowmem_limit_set_system_service(pid_t pid, unsigned long long limit_bytes,
-               const char *name, enum proc_action action);
-void lowmem_dbus_init(void);
-int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold);
-void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes);
-void lowmem_change_memory_state(int state, int force);
-unsigned long lowmem_get_ktotalram(void);
-unsigned long long lowmem_get_totalram(void);
-void lowmem_trigger_swap(pid_t pid, char *path, bool move);
-void lowmem_limit_init(void);
-void lowmem_limit_exit(void);
-int lowmem_limit_move_cgroup(struct proc_app_info *pai);
-int lowmem_reassign_limit(const char *dir,
-               unsigned long long limit_bytes, enum proc_action action);
-unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk);
-bool lowmem_fragmentated(void);
-unsigned int lowmem_get_proactive_thres(void);
-void lowmem_system_init();
-void lowmem_system_exit();
-
-/**
- * @desc restore memory cgroup from pid when resourced is restarted
- */
-void lowmem_restore_memcg(struct proc_app_info *pai);
-
-/*
- * Return memcg pointer to selected cgroup.
- */
-
-enum oom_killer_cb_flags {
-       OOM_NONE                = 0x0,          /* for main oom killer thread */
-       OOM_FORCE               = (0x1 << 0),   /* for forced kill */
-       OOM_NOMEMORY_CHECK      = (0x1 << 1),   /* check victims' memory */
-       /*------------------------------------------------------------------*/
-       OOM_IN_DEPTH            = (0x1 << 2),   /* consider all possible cgroups */
-       OOM_SINGLE_SHOT         = (0x1 << 3),   /* do not retry if failed to reclaim memory */
-       OOM_REVISE              = (0x1 << 4),   /* Run another attemp case failed for the first time*/
-       OOM_DROP                = (0x1 << 5),   /* deactivate the worker */
-};
-
-/**
- * @brief  Low memory killer status
- *
- * LOWMEM_RECLAIM_NONE: no potential candidates for memory reclaim
- * LOWMEM_RECLAIM_DONE: requested size of memory has been successfully
- *                     reclaimed through terminating number of processes
- * LOWMEM_RECLAIM_DROP: the whole reclaim procedure should be dropped
- * LOWMEM_RECLAIM_CONT: selected process might be considered as a potential
- *                     memory reclaim source - green light for terminating
- *                     the process
- * LOWMEM_RECLAIM_RETRY : check again after some seconds
- *                     because killing processes will take some time.
- * LOWMEM_RECLAIM_NEXT_TYPE : no potential candidates for memory reclaim in the current type
- */
-enum {
-       LOWMEM_RECLAIM_NONE,
-       LOWMEM_RECLAIM_DONE,
-       LOWMEM_RECLAIM_DROP,
-       LOWMEM_RECLAIM_CONT,
-       LOWMEM_RECLAIM_RETRY,
-       LOWMEM_RECLAIM_NEXT_TYPE
-};
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /*__LOWMEM_HANDLER_H__*/
index ee7a11c2cdc28b1404c64f83cfb359c032cb1352..96e7ca5ac46a80e5487372bf78cef9cc625590a3 100644 (file)
@@ -37,7 +37,7 @@
 #include "macro.h"
 #include "module.h"
 #include "module-data.h"
-#include "lowmem-handler.h"
+#include "lowmem.h"
 #include "notifier.h"
 #include "procfs.h"
 #include "cgroup.h"
index 53bffa4dccc7cc29ca06f314b89458a838832d96..dd202f522ec88ec45484435e9e20cf02ee557ea0 100644 (file)
@@ -36,7 +36,7 @@
 #include "macro.h"
 #include "module.h"
 #include "module-data.h"
-#include "lowmem-handler.h"
+#include "lowmem.h"
 #include "notifier.h"
 #include "procfs.h"
 #include "cgroup.h"
diff --git a/src/resource-limiter/memory/lowmem.c b/src/resource-limiter/memory/lowmem.c
new file mode 100644 (file)
index 0000000..4437f82
--- /dev/null
@@ -0,0 +1,1938 @@
+/*
+ * resourced
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * @file vmpressure-lowmem-handler.c
+ *
+ * @desc lowmem handler using memcgroup
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ */
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <limits.h>
+#include <vconf.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <dirent.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/shm.h>
+#include <sys/sysinfo.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <ctype.h>
+#include <bundle.h>
+#include <eventsystem.h>
+#include <malloc.h>
+
+#include "trace.h"
+#include "cgroup.h"
+#include "lowmem.h"
+#include "proc-common.h"
+#include "procfs.h"
+#include "freezer.h"
+#include "resourced.h"
+#include "macro.h"
+#include "notifier.h"
+#include "config-parser.h"
+#include "module.h"
+#include "swap-common.h"
+#include "cgroup.h"
+#include "memory-cgroup.h"
+#include "heart-common.h"
+#include "proc-main.h"
+#include "dbus-handler.h"
+#include "util.h"
+#include "fd-handler.h"
+#include "resourced-helper-worker.h"
+#include "safe-kill.h"
+#include "dedup-common.h"
+
+#define LOWMEM_THRES_INIT                   0
+
+#define MAX_VICTIMS_BETWEEN_CHECK           3
+#define MAX_PROACTIVE_HIGH_VICTIMS          4
+#define FOREGROUND_VICTIMS                  1
+#define OOM_KILLER_PRIORITY                 -20
+#define THRESHOLD_MARGIN                    10   /* MB */
+
+#define MEM_SIZE_64                         64   /* MB */
+#define MEM_SIZE_256                        256  /* MB */
+#define MEM_SIZE_448                        448  /* MB */
+#define MEM_SIZE_512                        512  /* MB */
+#define MEM_SIZE_768                        768  /* MB */
+#define MEM_SIZE_1024                       1024 /* MB */
+#define MEM_SIZE_2048                       2048 /* MB */
+
+/* thresholds for 64M RAM*/
+#define PROACTIVE_64_THRES                  10 /* MB */
+#define PROACTIVE_64_LEAVE                  30 /* MB */
+#define CGROUP_ROOT_64_THRES_DEDUP          16 /* MB */
+#define CGROUP_ROOT_64_THRES_SWAP           15 /* MB */
+#define CGROUP_ROOT_64_THRES_LOW            8  /* MB */
+#define CGROUP_ROOT_64_THRES_MEDIUM         5  /* MB */
+#define CGROUP_ROOT_64_THRES_LEAVE          8  /* MB */
+#define CGROUP_ROOT_64_NUM_VICTIMS          1
+
+/* thresholds for 256M RAM */
+#define PROACTIVE_256_THRES                 50 /* MB */
+#define PROACTIVE_256_LEAVE                 80 /* MB */
+#define CGROUP_ROOT_256_THRES_DEDUP         60 /* MB */
+#define CGROUP_ROOT_256_THRES_SWAP          40 /* MB */
+#define CGROUP_ROOT_256_THRES_LOW           20 /* MB */
+#define CGROUP_ROOT_256_THRES_MEDIUM        10 /* MB */
+#define CGROUP_ROOT_256_THRES_LEAVE         20 /* MB */
+#define CGROUP_ROOT_256_NUM_VICTIMS         2
+
+/* threshold for 448M RAM */
+#define PROACTIVE_448_THRES                 80  /* MB */
+#define PROACTIVE_448_LEAVE                 100 /* MB */
+#define CGROUP_ROOT_448_THRES_DEDUP         120 /* MB */
+#define CGROUP_ROOT_448_THRES_SWAP          100 /* MB */
+#define CGROUP_ROOT_448_THRES_LOW           60  /* MB */
+#define CGROUP_ROOT_448_THRES_MEDIUM        50  /* MB */
+#define CGROUP_ROOT_448_THRES_LEAVE         70  /* MB */
+#define CGROUP_ROOT_448_NUM_VICTIMS         5
+
+/* threshold for 512M RAM */
+#define PROACTIVE_512_THRES                 80  /* MB */
+#define PROACTIVE_512_LEAVE                 100 /* MB */
+#define CGROUP_ROOT_512_THRES_DEDUP         140 /* MB */
+#define CGROUP_ROOT_512_THRES_SWAP          100 /* MB */
+#define CGROUP_ROOT_512_THRES_LOW           70  /* MB */
+#define CGROUP_ROOT_512_THRES_MEDIUM        60  /* MB */
+#define CGROUP_ROOT_512_THRES_LEAVE         80  /* MB */
+#define CGROUP_ROOT_512_NUM_VICTIMS         5
+
+/* threshold for 768 RAM */
+#define PROACTIVE_768_THRES                 100 /* MB */
+#define PROACTIVE_768_LEAVE                 130 /* MB */
+#define CGROUP_ROOT_768_THRES_DEDUP         180 /* MB */
+#define CGROUP_ROOT_768_THRES_SWAP          150 /* MB */
+#define CGROUP_ROOT_768_THRES_LOW           90  /* MB */
+#define CGROUP_ROOT_768_THRES_MEDIUM        80  /* MB */
+#define CGROUP_ROOT_768_THRES_LEAVE         100 /* MB */
+#define CGROUP_ROOT_768_NUM_VICTIMS         5
+
+/* threshold for more than 1024M RAM */
+#define PROACTIVE_1024_THRES                150 /* MB */
+#define PROACTIVE_1024_LEAVE                230 /* MB */
+#define CGROUP_ROOT_1024_THRES_DEDUP        400 /* MB */
+#define CGROUP_ROOT_1024_THRES_SWAP         300 /* MB */
+#define CGROUP_ROOT_1024_THRES_LOW          120 /* MB */
+#define CGROUP_ROOT_1024_THRES_MEDIUM       100 /* MB */
+#define CGROUP_ROOT_1024_THRES_LEAVE        150 /* MB */
+#define CGROUP_ROOT_1024_NUM_VICTIMS        5
+
+/* threshold for more than 2048M RAM */
+#define PROACTIVE_2048_THRES                200 /* MB */
+#define PROACTIVE_2048_LEAVE                500 /* MB */
+#define CGROUP_ROOT_2048_THRES_DEDUP        400 /* MB */
+#define CGROUP_ROOT_2048_THRES_SWAP         300 /* MB */
+#define CGROUP_ROOT_2048_THRES_LOW          200 /* MB */
+#define CGROUP_ROOT_2048_THRES_MEDIUM       160 /* MB */
+#define CGROUP_ROOT_2048_THRES_LEAVE        300 /* MB */
+#define CGROUP_ROOT_2048_NUM_VICTIMS        10
+
+/* threshold for more than 3072M RAM */
+#define PROACTIVE_3072_THRES                300 /* MB */
+#define PROACTIVE_3072_LEAVE                700 /* MB */
+#define CGROUP_ROOT_3072_THRES_DEDUP        600 /* MB */
+#define CGROUP_ROOT_3072_THRES_SWAP         500 /* MB */
+#define CGROUP_ROOT_3072_THRES_LOW          400 /* MB */
+#define CGROUP_ROOT_3072_THRES_MEDIUM       250 /* MB */
+#define CGROUP_ROOT_3072_THRES_LEAVE        500 /* MB */
+#define CGROUP_ROOT_3072_NUM_VICTIMS        10
+
+static unsigned proactive_threshold_mb;
+static unsigned proactive_leave_mb;
+static unsigned lmk_start_threshold_mb;
+
+static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
+
+/**
+ * Resourced Low Memory Killer
+ * NOTE: planned to be moved to a separate file.
+ */
+/*-------------------------------------------------*/
+#define OOM_TIMER_INTERVAL_SEC 2
+#define LMW_LOOP_WAIT_TIMEOUT_MSEC     OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
+#define LMW_RETRY_WAIT_TIMEOUT_MSEC    (G_USEC_PER_SEC)
+
+struct lowmem_control {
+       /*
+        * For each queued request the following properties
+        * are required with two exceptions:
+        *  - status is being set by LMK
+        *  - callback is optional
+        */
+       /* Processing flags*/
+       unsigned int flags;
+       /* Indictator for OOM score of targeted processes */
+       enum oom_score score;
+
+       /* Desired size to be restored - level to be reached (MB)*/
+       unsigned int size_mb;
+       /* Max number of processes to be considered */
+       unsigned int count;
+       /* Memory reclaim status */
+       int status;
+       /*
+        * Optional - if set, will be triggered by LMK once the request
+        * is handled.
+        */
+       void (*callback) (struct lowmem_control *);
+};
+
+struct lowmem_worker {
+       pthread_t       worker_thread;
+       GAsyncQueue     *queue;
+       int             active;
+       int             running;
+};
+
+static struct lowmem_worker lmw;
+
+//static int memlog_enabled;
+//static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
+/* remove logfiles to reduce to this threshold.
+ * it is about five-sixths of the memlog_nr_max. */
+//static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
+//static char *memlog_path = DEFAULT_MEMLOG_PATH;
+//static char *memlog_prefix[MEMLOG_MAX];
+
+#define LOWMEM_WORKER_IS_ACTIVE(_lmw)  g_atomic_int_get(&(_lmw)->active)
+#define LOWMEM_WORKER_ACTIVATE(_lmw)   g_atomic_int_set(&(_lmw)->active, 1)
+#define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
+
+#define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
+#define LOWMEM_WORKER_RUN(_lmw)        g_atomic_int_set(&(_lmw)->running, 1)
+#define LOWMEM_WORKER_IDLE(_lmw)       g_atomic_int_set(&(_lmw)->running, 0)
+
+#define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
+
+#define LOWMEM_DESTROY_REQUEST(_ctl)           \
+       g_slice_free(typeof(*(_ctl)), _ctl);    \
+
+#define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
+{                                                                      \
+       (c)->flags      = __flags; (c)->score   = __score;              \
+       (c)->size_mb= __size;  (c)->count       = __count;              \
+       (c)->callback   = __cb;                                         \
+}
+
+#define BUFF_MAX        255
+#define APP_ATTR_PATH "/proc/%d/attr/current"
+
+static int get_privilege(pid_t pid, char *name, size_t len)
+{
+       char path[PATH_MAX];
+       char attr[BUFF_MAX];
+       size_t attr_len;
+       FILE *fp;
+
+       snprintf(path, sizeof(path), APP_ATTR_PATH, pid);
+
+       fp = fopen(path, "r");
+       if (!fp)
+               return -errno;
+
+       attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
+       fclose(fp);
+       if (attr_len <= 0)
+               return -ENOENT;
+
+       attr[attr_len] = '\0';
+
+       snprintf(name, len, "%s", attr);
+       return 0;
+}
+
+static int is_app(pid_t pid)
+{
+       char attr[BUFF_MAX];
+       size_t len;
+       int ret;
+
+       ret = get_privilege(pid, attr, sizeof(attr));
+       if (ret < 0) {
+               _E("Failed to get privilege of PID(%d).", pid);
+               return -1;
+       }
+
+       len = strlen(attr) + 1;
+
+       if (!strncmp("System", attr, len))
+               return 0;
+
+       if (!strncmp("User", attr, len))
+               return 0;
+
+       if (!strncmp("System::Privileged", attr, len))
+               return 0;
+
+       return 1;
+}
+
+
+static void lowmem_queue_request(struct lowmem_worker *lmw,
+                               struct lowmem_control *ctl)
+{
+       if (LOWMEM_WORKER_IS_ACTIVE(lmw))
+               g_async_queue_push(lmw->queue, ctl);
+}
+
+/* internal */
+static void lowmem_drain_queue(struct lowmem_worker *lmw)
+{
+       struct lowmem_control *ctl;
+
+       g_async_queue_lock(lmw->queue);
+       while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
+               if (ctl->callback)
+                       ctl->callback(ctl);
+               LOWMEM_DESTROY_REQUEST(ctl);
+       }
+       g_async_queue_unlock(lmw->queue);
+}
+
+static void lowmem_request_destroy(gpointer data)
+{
+       struct lowmem_control *ctl = (struct lowmem_control*) data;
+
+       if (ctl->callback)
+               ctl->callback(ctl);
+       LOWMEM_DESTROY_REQUEST(ctl);
+}
+
+/*-------------------------------------------------*/
+
+/* low memory action function for cgroup */
+/* low memory action function */
+static void high_mem_act(void);
+static void swap_activate_act(void);
+static void swap_compact_act(void);
+static void lmk_act(void);
+
+
+static size_t cur_mem_state = MEM_LEVEL_HIGH;
+static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
+static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
+
+static unsigned long long totalram_bytes;
+static unsigned long totalram_kb;
+
+static bool oom_popup_enable;
+static bool oom_popup;
+static bool memcg_swap_status;
+static int fragmentation_size;
+
+static const char *convert_cgroup_type_to_str(int type)
+{
+       static const char *type_table[] =
+       {"/", "Throttling"};
+       if (type >= MEMCG_ROOT && type <= MEMCG_THROTTLING)
+               return type_table[type];
+       else
+               return "Error";
+}
+
+static const char *convert_status_to_str(int status)
+{
+       static const char *status_table[] =
+       {"none", "done", "drop", "cont", "retry", "next_type"};
+       if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
+               return status_table[status];
+       return "error status";
+}
+
+static const char *convert_memstate_to_str(int mem_state)
+{
+       static const char *state_table[] = {"mem high", "mem medium",
+               "mem low", "mem critical", "mem oom",};
+       if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
+               return state_table[mem_state];
+       return "";
+}
+
+static int lowmem_launch_oompopup(void)
+{
+       GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
+       g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
+
+       GVariant *const params = g_variant_new("(a{ss})", gv_builder);
+       g_variant_builder_unref(gv_builder);
+
+       int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
+               SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
+               "PopupLaunch", params);
+
+       g_variant_unref(params);
+
+       return ret;
+}
+
+static inline void get_total_memory(void)
+{
+       struct sysinfo si;
+       if (totalram_bytes)
+               return;
+
+       if (!sysinfo(&si)) {
+               totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
+               totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
+
+               register_totalram_bytes(totalram_bytes);
+       }
+       else {
+               _E("Failed to get total ramsize from the kernel");
+       }
+}
+
+unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
+{
+       unsigned int size_kb = 0, total_size_kb = 0;
+       int index, ret;
+       pid_t pid;
+
+       /*
+        * If pids are allocated only when there are multiple processes with
+        * the same pgid e.g., browser and web process. Mostly, single process
+        * is used.
+        */
+       if (tsk->pids == NULL) {
+               ret = proc_get_ram_usage(tsk->pid, &size_kb);
+
+               /* If there is no proc entry for given pid the process
+                * should be abandoned during further processing
+                */
+               if (ret < 0)
+                       _D("failed to get rss memory usage of %d", tsk->pid);
+
+               return size_kb;
+       }
+
+       for (index = 0; index < tsk->pids->len; index++) {
+               pid = g_array_index(tsk->pids, pid_t, index);
+               ret = proc_get_ram_usage(pid, &size_kb);
+               if (ret != RESOURCED_ERROR_NONE)
+                       continue;
+               total_size_kb += size_kb;
+       }
+
+       return total_size_kb;
+}
+
+static int lowmem_kill_victim(const struct task_info *tsk,
+               int flags, int memps_log, unsigned int *victim_size)
+{
+       pid_t pid;
+       int ret;
+       char appname[PATH_MAX];
+       int sigterm = 0;
+       struct proc_app_info *pai;
+
+       pid = tsk->pid;
+
+       if (pid <= 0 || pid == getpid())
+               return RESOURCED_ERROR_FAIL;
+
+       ret = proc_get_cmdline(pid, appname, sizeof appname);
+       if (ret == RESOURCED_ERROR_FAIL)
+               return RESOURCED_ERROR_FAIL;
+
+       if (!strcmp("memps", appname) ||
+           !strcmp("crash-worker", appname) ||
+           !strcmp("system-syspopup", appname)) {
+               _E("%s(%d) was selected, skip it", appname, pid);
+               return RESOURCED_ERROR_FAIL;
+       }
+
+       pai = tsk->pai;
+       if (pai) {
+               resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
+                       pid, NULL, NULL, PROC_TYPE_NONE);
+
+               if (tsk->oom_score_lru <= OOMADJ_BACKGRD_LOCKED) {
+                       sigterm = 1;
+               } else if (tsk->oom_score_lru > OOMADJ_BACKGRD_LOCKED && tsk->oom_score_lru < OOMADJ_BACKGRD_UNLOCKED) {
+                       int app_flag = pai->flags;
+                       sigterm = app_flag & PROC_SIGTERM;
+               }
+
+               if (pai->memory.oom_killed)
+                       sigterm = 0;
+
+               pai->memory.oom_killed = true;
+       }
+
+       if (sigterm)
+               safe_kill(pid, SIGTERM);
+       else
+               safe_kill(pid, SIGKILL);
+
+       _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u KB, sigterm = %d\n",
+          flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
+          tsk->size, sigterm);
+       *victim_size = tsk->size;
+
+       if (tsk->oom_score_lru > OOMADJ_FOREGRD_UNLOCKED)
+               return RESOURCED_ERROR_NONE;
+
+       if (oom_popup_enable && !oom_popup) {
+               lowmem_launch_oompopup();
+               oom_popup = true;
+       }
+
+       return RESOURCED_ERROR_NONE;
+}
+
+/* return LOWMEM_RECLAIM_CONT when killing should be continued */
+static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
+{
+       unsigned int available_mb;
+
+       /*
+        * Processes with the priority higher than perceptible are killed
+        * only when the available memory is less than dynamic oom threshold.
+        */
+       if (tsk->oom_score_lru > OOMADJ_BACKGRD_PERCEPTIBLE)
+               return LOWMEM_RECLAIM_CONT;
+
+       if (flags & (OOM_FORCE|OOM_SINGLE_SHOT)) {
+               _I("[LMK] %d is dropped during force kill, flag=%d",
+                       tsk->pid, flags);
+               return LOWMEM_RECLAIM_DROP;
+       }
+       available_mb = proc_get_mem_available();
+       if (available_mb > lmk_start_threshold_mb) {
+               _I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
+                       available_mb, lmk_start_threshold_mb);
+               return LOWMEM_RECLAIM_RETRY;
+       }
+       return LOWMEM_RECLAIM_CONT;
+}
+
+static int compare_victims(const struct task_info *ta, const struct task_info *tb)
+{
+        unsigned int pa, pb;
+
+       assert(ta != NULL);
+       assert(tb != NULL);
+       /*
+        * followed by kernel badness point calculation using heuristic.
+        * oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
+        */
+       pa = ta->oom_score_lru * (totalram_kb / 2000) + ta->size;
+       pb = tb->oom_score_lru * (totalram_kb / 2000) + tb->size;
+
+       return pb - pa;
+}
+
+static void lowmem_free_task_info_array(GArray *array)
+{
+       int i;
+
+       for (i = 0; i < array->len; i++) {
+               struct task_info *tsk;
+
+               tsk = &g_array_index(array, struct task_info, i);
+               if (tsk->pids)
+                       g_array_free(tsk->pids, true);
+       }
+
+       g_array_free(array, true);
+}
+
+static inline int is_dynamic_process_killer(int flags)
+{
+       return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
+}
+
+static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
+{
+       unsigned int available = proc_get_mem_available();
+       unsigned int should_be_freed_mb = 0;
+
+       if (available < thres)
+               should_be_freed_mb = thres - available;
+       /*
+        * free THRESHOLD_MARGIN more than real should be freed,
+        * because launching app is consuming up the memory.
+        */
+       if (should_be_freed_mb > 0)
+               should_be_freed_mb += THRESHOLD_MARGIN;
+
+       *avail = available;
+
+       return should_be_freed_mb;
+}
+
+static int lowmem_get_pids_proc(GArray *pids)
+{
+       DIR *dp;
+       struct dirent *dentry;
+
+       dp = opendir("/proc");
+       if (!dp) {
+               _E("fail to open /proc");
+               return RESOURCED_ERROR_FAIL;
+       }
+       while ((dentry = readdir(dp)) != NULL) {
+               struct task_info tsk;
+               pid_t pid = 0, pgid = 0;
+               int oom = 0;
+
+               if (!isdigit(dentry->d_name[0]))
+                       continue;
+
+               pid = (pid_t)atoi(dentry->d_name);
+               if (pid < 1)
+                       /* skip invalid pids or kernel processes */
+                       continue;
+
+               pgid = getpgid(pid);
+               if (pgid < 1)
+                       continue;
+
+               if(is_app(pid) != 1)
+                       continue;
+
+               if (proc_get_oom_score_adj(pid, &oom) < 0) {
+                       _D("pid(%d) was already terminated", pid);
+                       continue;
+               }
+
+               /*
+                * Check whether this array includes applications or not.
+                * If it doesn't require to get applications
+                * and pid has been already included in pai,
+                * skip to append.
+                */
+               if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
+                       continue;
+
+               /*
+                * Currently, for tasks in the memory cgroup,
+                * do not consider multiple tasks with one pgid.
+                */
+               tsk.pid = pid;
+               tsk.pgid = pgid;
+               tsk.oom_score_adj = oom;
+               tsk.oom_score_lru = oom;
+               tsk.pids = NULL;
+               tsk.size = lowmem_get_task_mem_usage_rss(&tsk);
+               tsk.pai = NULL;
+
+               g_array_append_val(pids, tsk);
+       }
+
+       closedir(dp);
+       return RESOURCED_ERROR_NONE;
+}
+
+/**
+ * @brief Terminate up to max_victims processes after finding them from pai.
+       It depends on proc_app_info lists
+       and it also reference systemservice cgroup
+       because some processes in this group don't have proc_app_info.
+ *
+ * @max_victims:           max number of processes to be terminated
+ * @start_oom:     find victims from start oom adj score value
+ * @end_oom: find victims to end oom adj score value
+ * @should_be_freed: amount of memory to be reclaimed (in MB)
+ * @total_size[out]: total size of possibly reclaimed memory (required)
+ * @completed:     final outcome (optional)
+ * @threshold:         desired value of memory available
+ */
+static int lowmem_kill_victims(int max_victims,
+       int start_oom, int end_oom, unsigned should_be_freed, int flags,
+       unsigned int *total_size, int *completed, unsigned int threshold)
+{
+       int total_count = 0;
+       GSList *proc_app_list = NULL;
+       int i, ret, victim = 0;
+       unsigned int victim_size = 0;
+       unsigned int total_victim_size = 0;
+       int status = LOWMEM_RECLAIM_NONE;
+       GArray *candidates = NULL;
+       GSList *iter, *iterchild;
+       struct proc_app_info *pai = NULL;
+       int oom_score_adj;
+       int should_be_freed_kb = MBYTE_TO_KBYTE(should_be_freed);
+
+       candidates = g_array_new(false, false, sizeof(struct task_info));
+
+       proc_app_list = proc_app_list_open();
+       gslist_for_each_item(iter, proc_app_list) {
+               struct task_info ti;
+
+               total_count++;
+               pai = (struct proc_app_info *)iter->data;
+               if (!pai->main_pid)
+                       continue;
+
+               oom_score_adj = pai->memory.oom_score_adj;
+               if (oom_score_adj > end_oom || oom_score_adj < start_oom)
+                       continue;
+
+               if ((flags & OOM_REVISE) && pai->memory.oom_killed)
+                       continue;
+
+               ti.pid = pai->main_pid;
+               ti.pgid = getpgid(ti.pid);
+               ti.oom_score_adj = oom_score_adj;
+               ti.pai = pai;
+
+               /*
+                * Before oom_score_adj of favourite (oom_score = 270) applications is
+                * independent of lru_state, now we consider lru_state, while
+                * killing favourite process.
+                */
+
+               if (oom_score_adj == OOMADJ_FAVORITE && pai->lru_state >= PROC_BACKGROUND)
+                       ti.oom_score_lru = OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE * pai->lru_state;
+               else
+                       ti.oom_score_lru = oom_score_adj;
+
+               if (pai->childs) {
+                       ti.pids = g_array_new(false, false, sizeof(pid_t));
+                       g_array_append_val(ti.pids, ti.pid);
+                       gslist_for_each_item(iterchild, pai->childs) {
+                               pid_t child = GPOINTER_TO_PID(iterchild->data);
+                               g_array_append_val(ti.pids, child);
+                       }
+               } else
+                       ti.pids = NULL;
+
+               g_array_append_val(candidates, ti);
+       }
+
+       proc_app_list_close();
+
+       if (!candidates->len) {
+               status = LOWMEM_RECLAIM_NEXT_TYPE;
+               goto leave;
+       }
+       else {
+               _D("[LMK] candidate ratio=%d/%d", candidates->len, total_count);
+       }
+
+       for (i = 0; i < candidates->len; i++) {
+               struct task_info *tsk;
+
+               tsk = &g_array_index(candidates, struct task_info, i);
+               tsk->size = lowmem_get_task_mem_usage_rss(tsk);                 /* KB */
+       }
+
+       /*
+        * In case of start_oom == OOMADJ_SU,
+        * we're going to try to kill some of processes in /proc
+        * to handle low memory situation.
+        * It can find malicious system process even though it has low oom score.
+        */
+       if (start_oom == OOMADJ_SU)
+               lowmem_get_pids_proc(candidates);
+
+       g_array_sort(candidates, (GCompareFunc)compare_victims);
+
+       for (i = 0; i < candidates->len; i++) {
+               struct task_info *tsk;
+
+               if (i >= max_victims) {
+                       status = LOWMEM_RECLAIM_NEXT_TYPE;
+                       break;
+               }
+
+               /*
+                * Available memory is checking only every
+                * num_vict_between_check process for reducing burden.
+                */
+               if (!(i % num_vict_between_check)) {
+                       if (proc_get_mem_available() > threshold) {
+                               status = LOWMEM_RECLAIM_DONE;
+                               break;
+                       }
+               }
+
+               if (!(flags & OOM_NOMEMORY_CHECK) &&
+                   total_victim_size >= should_be_freed_kb) {
+                       _D("[LMK] victim=%d, max_victims=%d, total_size=%uKB",
+                               victim, max_victims, total_victim_size);
+                       status = LOWMEM_RECLAIM_DONE;
+                       break;
+               }
+
+               tsk = &g_array_index(candidates, struct task_info, i);
+
+               status = lowmem_check_kill_continued(tsk, flags);
+               if (status != LOWMEM_RECLAIM_CONT)
+                       break;
+
+               _I("[LMK] select victims from proc_app_list pid(%d) with oom_score_adj(%d)\n", tsk->pid, tsk->oom_score_adj);
+
+               ret = lowmem_kill_victim(tsk, flags, i, &victim_size);
+               if (ret != RESOURCED_ERROR_NONE)
+                       continue;
+               victim++;
+               total_victim_size += victim_size;
+       }
+
+leave:
+       lowmem_free_task_info_array(candidates);
+       *total_size = total_victim_size;
+       if(*completed != LOWMEM_RECLAIM_CONT)
+               *completed = status;
+       else
+               *completed = LOWMEM_RECLAIM_NEXT_TYPE;
+       return victim;
+}
+
+static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
+{
+       if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
+               _E("[LMK] oom score (%d) is out of scope", score);
+               return RESOURCED_ERROR_FAIL;
+       }
+
+       *max = cgroup_get_highest_oom_score_adj(score);
+       *min = cgroup_get_lowest_oom_score_adj(score);
+
+       return RESOURCED_ERROR_NONE;
+}
+
+static void lowmem_handle_request(struct lowmem_control *ctl)
+{
+       int start_oom, end_oom;
+       int count = 0, victim_cnt = 0;
+       int max_victim_cnt = ctl->count;
+       int status = LOWMEM_RECLAIM_NONE;
+       unsigned int available_mb = 0;
+       unsigned int total_size_mb = 0;
+       unsigned int current_size = 0;
+       unsigned int reclaim_size_mb, shortfall_mb = 0;
+       enum oom_score oom_score = ctl->score;
+
+       available_mb = proc_get_mem_available();
+       reclaim_size_mb = ctl->size_mb  > available_mb                  /* MB */
+                    ? ctl->size_mb - available_mb : 0;
+
+       if (!reclaim_size_mb) {
+               status = LOWMEM_RECLAIM_DONE;
+               goto done;
+       }
+
+retry:
+       /* Prepare LMK to start doing it's job. Check preconditions. */
+       if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
+               goto done;
+
+       lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
+       shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
+
+       if (!shortfall_mb || !reclaim_size_mb) {
+               status = LOWMEM_RECLAIM_DONE;
+               goto done;
+       }
+
+       /* precaution */
+       current_size = 0;
+       victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
+                           reclaim_size_mb, ctl->flags, &current_size, &status, ctl->size_mb);
+
+       if (victim_cnt) {
+               current_size = KBYTE_TO_MBYTE(current_size);
+               reclaim_size_mb -= reclaim_size_mb > current_size
+                       ? current_size : reclaim_size_mb;
+               total_size_mb += current_size;
+               count += victim_cnt;
+               _I("[LMK] current: kill %d victims,  reclaim_size=%uMB from %d to %d status=%s",
+                               victim_cnt, current_size,
+                               start_oom, end_oom, convert_status_to_str(status));
+       }
+
+       if ((status == LOWMEM_RECLAIM_DONE) ||
+           (status == LOWMEM_RECLAIM_DROP) ||
+           (status == LOWMEM_RECLAIM_RETRY))
+               goto done;
+
+       /*
+        * If it doesn't finish reclaiming memory in first operation,
+               - if flags has OOM_IN_DEPTH,
+                  try to find victims again in the active cgroup.
+                  otherwise, just return because there is no more victims in the desired cgroup.
+               - if flags has OOM_REVISE,
+                  it means that resourced can't find victims from proc_app_list.
+                  So, it should search victims or malicious process from /proc.
+                  But searching /proc leads to abnormal behaviour.
+                  (Make sluggish or kill same victims continuously)
+                  Thus, otherwise, just return in first operation and wait some period.
+        */
+       if (oom_score == OOM_SCORE_LOW) {
+               oom_score = OOM_SCORE_MEDIUM;
+               goto retry;
+       } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
+               oom_score = OOM_SCORE_HIGH;
+               if(ctl->flags & OOM_FORCE)
+                       max_victim_cnt = FOREGROUND_VICTIMS;
+               goto retry;
+       } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
+               status = LOWMEM_RECLAIM_RETRY;
+               ctl->score = OOM_SCORE_MAX;
+       }
+       else if (oom_score == OOM_SCORE_MAX) {
+               status = LOWMEM_RECLAIM_RETRY;
+       }
+done:
+       _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
+               count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
+
+       /* After we finish reclaiming it's worth to remove oldest memps logs */
+       ctl->status = status;
+}
+
+static void *lowmem_reclaim_worker(void *arg)
+{
+       struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
+
+       setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
+
+       g_async_queue_ref(lmw->queue);
+
+       while (1) {
+               int try_count = 0;
+               struct lowmem_control *ctl;
+
+               LOWMEM_WORKER_IDLE(lmw);
+               /* Wait on any wake-up call */
+               ctl = g_async_queue_pop(lmw->queue);
+
+               if (!ctl) {
+                       _W("[LMK] ctl structure is NULL");
+                       continue;
+               }
+
+               if ((ctl->flags & OOM_DROP) || !LOWMEM_WORKER_IS_ACTIVE(lmw)) {
+                       LOWMEM_DESTROY_REQUEST(ctl);
+                       break;
+               }
+
+               LOWMEM_WORKER_RUN(lmw);
+process_again:
+               _D("[LMK] %d tries", ++try_count);
+               lowmem_handle_request(ctl);
+               /**
+                * Case the process failed to reclaim requested amount of memory
+                * or still under have memory pressure - try the timeout wait.
+                * There is a chance this will get woken-up in a better reality.
+                */
+               if (ctl->status == LOWMEM_RECLAIM_RETRY &&
+                   !(ctl->flags & OOM_SINGLE_SHOT)) {
+                       unsigned int available_mb = proc_get_mem_available();
+
+                       if (available_mb >= ctl->size_mb) {
+                               _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
+                                       ctl->size_mb, available_mb);
+                               ctl->status = LOWMEM_RECLAIM_DONE;
+                               if (ctl->callback)
+                                       ctl->callback(ctl);
+                               LOWMEM_DESTROY_REQUEST(ctl);
+                               LOWMEM_WORKER_IDLE(lmw);
+                               continue;
+                       }
+
+                       if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
+                               g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
+                               ctl->flags |= OOM_REVISE;
+                               goto process_again;
+                       }
+               }
+
+               /*
+                * The ctl callback would check available size again.
+                * And it is last point in reclaiming worker.
+                * Resourced sent SIGKILL signal to victim processes
+                * so it should wait for a some seconds until each processes returns memory.
+                */
+               g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
+               if (ctl->callback)
+                       ctl->callback(ctl);
+
+               /* The lmk becomes the owner of all queued requests .. */
+               LOWMEM_DESTROY_REQUEST(ctl);
+               LOWMEM_WORKER_IDLE(lmw);
+       }
+       g_async_queue_unref(lmw->queue);
+       pthread_exit(NULL);
+}
+
+static void change_lowmem_state(unsigned int mem_state)
+{
+       cur_mem_state = mem_state;
+       lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
+
+       resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
+               (void *)&cur_mem_state);
+}
+
+/* only app can call this function
+ * that is, service cannot call the function
+ */
+static void lowmem_swap_memory(char *path)
+{
+       unsigned int available_mb;
+
+       if (cur_mem_state == MEM_LEVEL_HIGH)
+               return;
+
+       if (swap_get_state() != SWAP_ON)
+               return;
+
+       available_mb = proc_get_mem_available();
+       if (cur_mem_state != MEM_LEVEL_LOW &&
+           available_mb <= get_root_memcg_info()->threshold_mb[MEM_LEVEL_LOW])
+               swap_activate_act();
+
+       resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
+       memcg_swap_status = true;
+}
+
+void lowmem_trigger_swap(pid_t pid, char *path, bool move)
+{
+       int error;
+       int oom_score_adj;
+       int lowest_oom_score_adj;
+
+       if (!path) {
+               _E("[SWAP] Unknown memory cgroup path to swap");
+               return;
+       }
+
+       /* In this case, corresponding process will be moved to memory MEMCG_THROTTLING.
+        */
+       if (move) {
+               error = proc_get_oom_score_adj(pid, &oom_score_adj);
+               if (error) {
+                       _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
+                       return;
+               }
+
+               lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
+
+               if (oom_score_adj < lowest_oom_score_adj) {
+                       oom_score_adj = lowest_oom_score_adj;
+                       /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
+                       proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
+                       return;
+               }
+       }
+
+       /* Correponding process is already managed per app or service.
+        * In addition, if some process is already located in the MEMCG_THROTTLING, then just do swap
+        */
+       resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
+}
+
+static void memory_level_send_system_event(int lv)
+{
+       bundle *b;
+       const char *str;
+
+       switch (lv) {
+               case MEM_LEVEL_HIGH:
+               case MEM_LEVEL_MEDIUM:
+               case MEM_LEVEL_LOW:
+                       str = EVT_VAL_MEMORY_NORMAL;
+                       break;
+               case MEM_LEVEL_CRITICAL:
+                       str = EVT_VAL_MEMORY_SOFT_WARNING;
+                       break;
+               case MEM_LEVEL_OOM:
+                       str = EVT_VAL_MEMORY_HARD_WARNING;
+                       break;
+               default:
+                       _E("Invalid state");
+                       return;
+       }
+
+       b = bundle_create();
+       if (!b) {
+               _E("Failed to create bundle");
+               return;
+       }
+
+       bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
+       eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
+       bundle_free(b);
+}
+
+static void high_mem_act(void)
+{
+       int ret, status;
+
+       ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
+       if (ret)
+               _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
+       if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
+               vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
+                             VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
+               memory_level_send_system_event(MEM_LEVEL_HIGH);
+       }
+
+       change_lowmem_state(MEM_LEVEL_HIGH);
+
+       if (swap_get_state() == SWAP_ON && memcg_swap_status) {
+               resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(MEMCG_THROTTLING));
+               memcg_swap_status = false;
+       }
+       if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
+               resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
+                       (void *)CGROUP_FREEZER_ENABLED);
+}
+
+static void swap_activate_act(void)
+{
+       int ret, status;
+
+       ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
+       if (ret)
+               _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
+
+       if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
+               vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
+                               VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
+               memory_level_send_system_event(MEM_LEVEL_LOW);
+       }
+       change_lowmem_state(MEM_LEVEL_LOW);
+       if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
+               resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
+                       (void *)CGROUP_FREEZER_ENABLED);
+
+       if (swap_get_state() != SWAP_ON)
+               resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
+}
+
+static void dedup_act(enum ksm_scan_mode mode)
+{
+       int ret, status;
+       int data;
+
+       if (dedup_get_state() != DEDUP_ONE_SHOT)
+               return;
+
+       if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
+               resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
+                               (void *)CGROUP_FREEZER_ENABLED);
+
+       if (mode == KSM_SCAN_PARTIAL) {
+               ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
+               if (ret)
+                       _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
+
+               if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
+                       vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
+                                       VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
+                       memory_level_send_system_event(MEM_LEVEL_MEDIUM);
+               }
+               change_lowmem_state(MEM_LEVEL_MEDIUM);
+
+               data = KSM_SCAN_PARTIAL;
+               resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
+       } else if (mode == KSM_SCAN_FULL) {
+               data = KSM_SCAN_FULL;
+               resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
+       }
+}
+
+static void swap_compact_act(void)
+{
+       change_lowmem_state(MEM_LEVEL_CRITICAL);
+       resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
+       memory_level_send_system_event(MEM_LEVEL_CRITICAL);
+}
+
+static void medium_cb(struct lowmem_control *ctl)
+{
+       if (ctl->status == LOWMEM_RECLAIM_DONE)
+               oom_popup = false;
+       lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
+}
+
+static void lmk_act(void)
+{
+       unsigned int available_mb;
+       int ret;
+       int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
+
+       /*
+        * Don't trigger reclaim worker
+        * if it is already running
+        */
+       if (LOWMEM_WORKER_IS_RUNNING(&lmw))
+               return;
+
+       ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
+       if (ret)
+               _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
+
+       memory_level_send_system_event(MEM_LEVEL_OOM);
+       if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
+               if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
+                       resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
+                               (void *)CGROUP_FREEZER_PAUSED);
+               vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
+                             VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
+       }
+       available_mb = proc_get_mem_available();
+
+       change_lowmem_state(MEM_LEVEL_OOM);
+
+       if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
+               struct lowmem_control *ctl;
+
+               ctl = LOWMEM_NEW_REQUEST();
+               if (ctl) {
+                       LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
+                               OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
+                               num_max_victims, medium_cb);
+                       lowmem_queue_request(&lmw, ctl);
+               }
+       }
+
+       resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
+
+       /*
+        * Flush resourced memory such as other processes.
+        * Resourced can use both many fast bins and sqlite3 cache memery.
+        */
+       malloc_trim(0);
+
+       return;
+}
+
+static void lowmem_trigger_memory_state_action(int mem_state)
+{
+       /*
+        * Check if the state we want to set is different from current
+        * But it should except this condition if mem_state is already medium.
+        * Otherwise, recalim worker couldn't run any more.
+        */
+       if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
+               return;
+
+       switch (mem_state) {
+       case MEM_LEVEL_HIGH:
+               high_mem_act();
+               break;
+       case MEM_LEVEL_MEDIUM:
+               dedup_act(KSM_SCAN_PARTIAL);
+               break;
+       case MEM_LEVEL_LOW:
+               swap_activate_act();
+               break;
+       case MEM_LEVEL_CRITICAL:
+               dedup_act(KSM_SCAN_FULL);
+               swap_compact_act();
+               break;
+       case MEM_LEVEL_OOM:
+               lmk_act();
+               break;
+       default:
+               assert(0);
+       }
+}
+
+static unsigned int check_mem_state(unsigned int available_mb)
+{
+       int mem_state;
+       for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
+               if (mem_state != MEM_LEVEL_OOM &&
+                               available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
+                       break;
+               else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
+                       break;
+       }
+
+       return mem_state;
+}
+
+/* setup memcg parameters depending on total ram size. */
+static void setup_memcg_params(void)
+{
+       unsigned long total_ramsize_mb;
+
+       get_total_memory();
+       total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
+
+       _D("Total: %lu MB", total_ramsize_mb);
+       if (total_ramsize_mb <= MEM_SIZE_64) {
+               /* set thresholds for ram size 64M */
+               proactive_threshold_mb = PROACTIVE_64_THRES;
+               proactive_leave_mb = PROACTIVE_64_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
+       } else if (total_ramsize_mb <= MEM_SIZE_256) {
+               /* set thresholds for ram size 256M */
+               proactive_threshold_mb = PROACTIVE_256_THRES;
+               proactive_leave_mb = PROACTIVE_256_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
+       } else if (total_ramsize_mb <= MEM_SIZE_448) {
+               /* set thresholds for ram size 448M */
+               proactive_threshold_mb = PROACTIVE_448_THRES;
+               proactive_leave_mb = PROACTIVE_448_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
+       } else if (total_ramsize_mb <= MEM_SIZE_512) {
+               /* set thresholds for ram size 512M */
+               proactive_threshold_mb = PROACTIVE_512_THRES;
+               proactive_leave_mb = PROACTIVE_512_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
+       }  else if (total_ramsize_mb <= MEM_SIZE_768) {
+               /* set thresholds for ram size 768M */
+               proactive_threshold_mb = PROACTIVE_768_THRES;
+               proactive_leave_mb = PROACTIVE_768_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
+       } else if (total_ramsize_mb <= MEM_SIZE_1024) {
+               /* set thresholds for ram size more than 1G */
+               proactive_threshold_mb = PROACTIVE_1024_THRES;
+               proactive_leave_mb = PROACTIVE_1024_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
+       } else if (total_ramsize_mb <= MEM_SIZE_2048) {
+               proactive_threshold_mb = PROACTIVE_2048_THRES;
+               proactive_leave_mb = PROACTIVE_2048_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
+       } else {
+               proactive_threshold_mb = PROACTIVE_3072_THRES;
+               proactive_leave_mb = PROACTIVE_3072_LEAVE;
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
+               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
+               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
+               num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
+       }
+}
+
+static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
+{
+       int cur_oom_score_adj;
+       int cur_memcg_idx;
+       struct memcg_info *mi;
+       int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
+
+       mi = get_memcg_info(next_memcg_idx);
+
+       if (!mi) {
+               return;
+       }
+
+       if (!pai) {
+               cgroup_write_pid_fullpath(mi->name, pid);
+               return;
+       }
+
+       /* parent pid */
+       if (pai->main_pid == pid) {
+               cur_oom_score_adj = pai->memory.oom_score_adj;
+               cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
+
+               if (cur_oom_score_adj == next_oom_score_adj) {
+                       _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
+                       return;
+               }
+
+               proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
+
+               if (!lowmem_limit_move_cgroup(pai))
+                       return;
+
+               if(cur_memcg_idx == next_memcg_idx)
+                       return;
+
+               _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
+               cgroup_write_pid_fullpath(mi->name, pid);
+               if (next_memcg_idx == MEMCG_THROTTLING)
+                       lowmem_swap_memory(get_memcg_info(MEMCG_THROTTLING)->name);
+       }
+       /* child pid */
+       else {
+               if (pai->memory.use_mem_limit)
+                       return;
+
+               cgroup_write_pid_fullpath(mi->name, pid);
+       }
+}
+
+static int lowmem_activate_worker(void)
+{
+       int ret = RESOURCED_ERROR_NONE;
+
+       if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
+               return ret;
+       }
+
+       lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
+       if (!lmw.queue) {
+               _E("Failed to create request queue\n");
+               return RESOURCED_ERROR_FAIL;
+       }
+       LOWMEM_WORKER_ACTIVATE(&lmw);
+       ret = pthread_create(&lmw.worker_thread, NULL,
+               (void *)lowmem_reclaim_worker, (void *)&lmw);
+       if (ret) {
+               LOWMEM_WORKER_DEACTIVATE(&lmw);
+               _E("Failed to create LMK thread: %d\n", ret);
+       } else {
+               pthread_detach(lmw.worker_thread);
+               ret = RESOURCED_ERROR_NONE;
+       }
+       return ret;
+}
+
+static void lowmem_deactivate_worker(void)
+{
+       struct lowmem_control *ctl;
+
+       if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
+               return;
+
+       LOWMEM_WORKER_DEACTIVATE(&lmw);
+       lowmem_drain_queue(&lmw);
+
+       ctl = LOWMEM_NEW_REQUEST();
+       if (!ctl) {
+               _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
+               return;
+       }
+       ctl->flags = OOM_DROP;
+       g_async_queue_push(lmw.queue, ctl);
+       g_async_queue_unref(lmw.queue);
+}
+
+static int lowmem_press_eventfd_read(int fd)
+{
+       unsigned long long dummy_state;
+
+       return read(fd, &dummy_state, sizeof(dummy_state));
+}
+
+static void lowmem_press_root_cgroup_handler(void)
+{
+       static unsigned int prev_available_mb;
+       unsigned int available_mb;
+       int mem_state;
+
+       available_mb = proc_get_mem_available();
+       if (prev_available_mb == available_mb)
+               return;
+
+       mem_state = check_mem_state(available_mb);
+       lowmem_trigger_memory_state_action(mem_state);
+       prev_available_mb = available_mb;
+}
+
+static bool lowmem_press_eventfd_handler(int fd, void *data)
+{
+       struct memcg_info *mi;
+       enum cgroup_type type = MEMCG_ROOT;
+
+       // FIXME: probably shouldn't get ignored
+       if (lowmem_press_eventfd_read(fd) < 0)
+               _E("Failed to read lowmem press event, %m\n");
+
+       for (type = MEMCG_ROOT; type < MEMCG_END; type++) {
+               if (!get_cgroup_tree(type) || !get_memcg_info(type))
+                       continue;
+               mi = get_memcg_info(type);
+               if (fd == mi->evfd) {
+                       /* call low memory handler for this memcg */
+                       if (type == MEMCG_ROOT) {
+                               lowmem_press_root_cgroup_handler();
+                               return true;
+                       }
+                       else {
+                               _E("Wrong event fd for cgroup %s", convert_cgroup_type_to_str(type));
+                               return false;
+                       }
+               }
+       }
+
+       return false;
+}
+
+static int lowmem_press_register_eventfd(struct memcg_info *mi)
+{
+       int evfd;
+       const char *name = mi->name;
+       static fd_handler_h handler;
+
+       if (mi->threshold_mb[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
+               return 0;
+
+       evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
+                       event_level);
+
+       if (evfd < 0) {
+               int saved_errno = errno;
+               _E("Failed to register event press fd %s cgroup", name);
+               return -saved_errno;
+       }
+
+       mi->evfd = evfd;
+
+       add_fd_read_handler(NULL, evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
+       return 0;
+}
+
+static int lowmem_press_setup_eventfd(void)
+{
+       unsigned int i;
+
+       for (i = MEMCG_ROOT; i < MEMCG_END; i++) {
+               if (!get_use_hierarchy(i))
+                       continue;
+
+               lowmem_press_register_eventfd(get_memcg_info(i));
+       }
+       return RESOURCED_ERROR_NONE;
+}
+
+static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
+{
+       lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
+}
+
+int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
+{
+       struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
+
+       if (!ctl)
+               return -ENOMEM;
+
+       flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
+       victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
+       score = score > 0 ? score : OOM_SCORE_LOW;
+       threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
+
+       lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
+       LOWMEM_SET_REQUEST(ctl, flags,
+               score, threshold_mb, victims,
+               lowmem_force_reclaim_cb);
+       lowmem_queue_request(&lmw, ctl);
+
+       return 0;
+}
+
+void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
+{
+       int size_mb, victims;
+
+       victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
+                                ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
+
+       size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
+       lowmem_trigger_reclaim(0, victims, score, size_mb);
+}
+
+bool lowmem_fragmentated(void)
+{
+       struct buddyinfo bi;
+       int ret;
+
+       ret = proc_get_buddyinfo("Normal", &bi);
+       if (ret < 0)
+               return false;
+
+       /*
+        * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
+        * If total buddy pages is smaller than fragmentation_size,
+        * resourced will detect kernel memory is fragmented.
+        * Default value is zero in low memory device.
+        */
+       if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
+               (bi.page[PAGE_256K] << 3) < fragmentation_size) {
+               _I("fragmentation detected, need to execute proactive oom killer");
+               return true;
+       }
+       return false;
+}
+
+static void lowmem_proactive_oom_killer(int flags, char *appid)
+{
+       unsigned int before_mb;
+       int victims;
+
+       before_mb = proc_get_mem_available();
+
+       /* If memory state is medium or normal, just return and kill in oom killer */
+       if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
+                       before_mb > proactive_leave_mb)
+               return;
+
+       victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
+                                ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
+
+#ifdef HEART_SUPPORT
+       /*
+        * This branch is used only when HEART module is compiled in and
+        * it's MEMORY module must be enabled. Otherwise this is skipped.
+        */
+       struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
+       if (md) {
+               unsigned int rss_mb, after_mb, size_mb;
+
+               rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
+
+               free(md);
+
+               after_mb = before_mb - rss_mb;
+               /*
+                * after launching app, ensure that available memory is
+                * above threshold_leave
+                */
+               if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
+                       return;
+
+               if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
+                       size_mb = proactive_threshold_mb;
+               else
+                       size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
+
+               _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
+                       rss_mb, before_mb, size_mb);
+               lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
+
+               return;
+       }
+#endif
+
+       /*
+        * When there is no history data for the launching app,
+        * it is necessary to check current fragmentation state or application manifest file.
+        * So, resourced feels proactive LMK is required, run oom killer based on dynamic
+        * threshold.
+        */
+       if (lowmem_fragmentated())
+               goto reclaim;
+
+       /*
+        * run proactive oom killer only when available is larger than
+        * dynamic process threshold
+        */
+       if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
+               return;
+
+       if (!(flags & PROC_LARGEMEMORY))
+               return;
+
+reclaim:
+       /*
+        * free THRESHOLD_MARGIN more than real should be freed,
+        * because launching app is consuming up the memory.
+        */
+       _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
+               proactive_leave_mb + THRESHOLD_MARGIN);
+       lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
+}
+
+unsigned int lowmem_get_proactive_thres(void)
+{
+       return proactive_threshold_mb;
+}
+
+static int lowmem_prelaunch_handler(void *data)
+{
+       struct proc_status *ps = (struct proc_status *)data;
+       struct proc_app_info *pai = ps->pai;
+
+       if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
+               return RESOURCED_ERROR_NONE;
+
+       lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
+       return RESOURCED_ERROR_NONE;
+}
+
+int lowmem_control_handler(void *data)
+{
+       struct lowmem_control_data *lowmem_data;
+
+       lowmem_data = (struct lowmem_control_data *)data;
+       switch (lowmem_data->control_type) {
+       case LOWMEM_MOVE_CGROUP:
+               lowmem_move_memcgroup((pid_t)lowmem_data->pid,
+                                       lowmem_data->oom_score_adj, lowmem_data->pai);
+               break;
+       default:
+               break;
+       }
+       return RESOURCED_ERROR_NONE;
+}
+
+static inline int calculate_threshold_size(double ratio)
+{
+       unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
+       return BYTE_TO_MBYTE(size_bytes);
+}
+
+static void load_configs(void)
+{
+       struct memcg_conf *memcg_conf = get_memcg_conf();
+
+       /* set MemoryGroupLimit section */
+       for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
+               if (memcg_conf->cgroup_limit[cgroup] > 0.0)
+                       memcg_info_set_limit(get_memcg_info(cgroup),
+                                       memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
+       }
+
+       /* set MemoryLevelThreshold section */
+       for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
+               if (memcg_conf->threshold[lvl].percent &&
+                       memcg_conf->threshold[lvl].threshold > 0) {
+                       memcg_set_threshold(MEMCG_ROOT, lvl,
+                                       calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
+
+                       if (lvl == MEM_LEVEL_OOM) {
+                               memcg_set_leave_threshold(MEMCG_ROOT,
+                                               get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
+                               proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
+                               proactive_leave_mb = proactive_threshold_mb * 1.5;
+                       }
+               }
+               else if (memcg_conf->threshold[lvl].threshold > 0) {
+                       memcg_set_threshold(MEMCG_ROOT, lvl,
+                                       memcg_conf->threshold[lvl].threshold);
+
+                       if (lvl == MEM_LEVEL_OOM) {
+                               memcg_set_leave_threshold(MEMCG_ROOT,
+                                               get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
+                               proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
+                               proactive_leave_mb = proactive_threshold_mb * 1.5;
+                       }
+
+               }
+       }
+       oom_popup_enable = memcg_conf->oom_popup;
+
+       /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
+       lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
+                       memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
+       lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
+                       memcg_conf->guiapp.action, memcg_conf->background.action);
+
+       free_memcg_conf();
+}
+
+static void print_mem_configs(void)
+{
+       /* print info of Memory section */
+       for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
+               _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
+                               convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
+       }
+
+       for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
+               for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
+                       _I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", convert_cgroup_type_to_str(cgroup),
+                                       convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
+               }
+       }
+
+       _I("[LMK] set number of max victims as %d", num_max_victims);
+       _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
+       _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
+       _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
+
+       /* print info of POPUP section */
+       _I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
+}
+
+#include "file-helper.h"
+
+/* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
+static int lowmem_init(void)
+{
+       int ret = RESOURCED_ERROR_NONE;
+
+       _D("resourced memory init start");
+
+       /* init memcg */
+       ret = memcg_make_full_subdir(MEMCG_PATH);
+       ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
+       memcg_params_init();
+
+       setup_memcg_params();
+
+       /* default configuration */
+       load_configs();
+
+       /* this function should be called after parsing configurations */
+       memcg_write_limiter_params();
+       print_mem_configs();
+
+       /* make a worker thread called low memory killer */
+       ret = lowmem_activate_worker();
+       if (ret) {
+               _E("[LMK] oom thread create failed\n");
+               return ret;
+       }
+
+       /* register threshold and event fd */
+       ret = lowmem_press_setup_eventfd();
+       if (ret) {
+               _E("[MEMORY-LIMIT] eventfd setup failed");
+               return ret;
+       }
+
+       lowmem_dbus_init();
+       lowmem_limit_init();
+       lowmem_system_init();
+
+       register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
+       register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
+
+       return ret;
+}
+
+static int lowmem_exit(void)
+{
+       lowmem_deactivate_worker();
+       lowmem_limit_exit();
+       lowmem_system_exit();
+
+       unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
+       unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
+
+       return RESOURCED_ERROR_NONE;
+}
+
+static int resourced_memory_init(void *data)
+{
+       return lowmem_init();
+}
+
+static int resourced_memory_finalize(void *data)
+{
+       return lowmem_exit();
+}
+
+void lowmem_change_memory_state(int state, int force)
+{
+       int mem_state;
+
+       if (force) {
+               mem_state = state;
+       } else {
+               unsigned int available_mb = proc_get_mem_available();
+               mem_state = check_mem_state(available_mb);
+       }
+
+       lowmem_trigger_memory_state_action(mem_state);
+}
+
+unsigned long lowmem_get_ktotalram(void)
+{
+       return totalram_kb;
+}
+
+unsigned long long lowmem_get_totalram(void)
+{
+       return totalram_bytes;
+}
+
+void lowmem_restore_memcg(struct proc_app_info *pai)
+{
+       char *cgpath;
+       int index, ret;
+       struct cgroup *cgroup = NULL;
+       struct memcg_info *mi = NULL;
+       pid_t pid = pai->main_pid;
+
+       ret = cgroup_pid_get_path("memory", pid, &cgpath);
+       if (ret < 0)
+               return;
+
+       for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
+               cgroup = get_cgroup_tree(index);
+               if (!cgroup)
+                       continue;
+
+               mi = cgroup->memcg_info;
+               if (!mi)
+                       continue;
+
+               if (!strcmp(cgroup->hashname, ""))
+                       continue;
+               if (strstr(cgpath, cgroup->hashname))
+                       break;
+       }
+       pai->memory.memcg_idx = index;
+       pai->memory.memcg_info = mi;
+       if(strstr(cgpath, pai->appid))
+               pai->memory.use_mem_limit = true;
+
+       free(cgpath);
+}
+
+static struct module_ops memory_modules_ops = {
+       .priority       = MODULE_PRIORITY_EARLY,
+       .name           = "lowmem",
+       .init           = resourced_memory_init,
+       .exit           = resourced_memory_finalize,
+};
+
+MODULE_REGISTER(&memory_modules_ops)
diff --git a/src/resource-limiter/memory/lowmem.h b/src/resource-limiter/memory/lowmem.h
new file mode 100644 (file)
index 0000000..154ff12
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * resourced
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * @file lowmem_handler.h
+ * @desc handler function for setting memcgroup memory controller and
+ *     receiving event fd.
+ **/
+
+#ifndef __LOWMEM_HANDLER_H__
+#define __LOWMEM_HANDLER_H__
+
+#include <proc-common.h>
+#include <memory-cgroup.h>
+#include "fd-handler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define MAX_MEMORY_CGROUP_VICTIMS      10
+
+
+struct task_info {
+       /*
+        * Mostly, there are not multiple processes with the same pgid.
+        * So, for the frequent case, we use pid variable to avoid
+        * allocating arrays.
+        */
+       pid_t pid;
+       GArray *pids;
+       pid_t pgid;
+       /* oom_score_adj is smae as /proc/<pid>/oom_score_adj */
+       int oom_score_adj;
+       /*
+        * oom_score_lru is same as oom_score_adj or adjusted by
+        * proc_app_info lru_state for apps that are marked as favourite.
+        *
+        * oom_score_lru is the main value used in comparison for LMK.
+        */
+       int oom_score_lru;
+       int size;
+       struct proc_app_info *pai;
+};
+
+struct memory_limit_event {
+       int fd;
+       unsigned long long threshold_bytes;             /* byte */
+       char *path;
+       enum proc_action action;
+};
+
+/**
+ * @desc execute /usr/bin/memps and make log file with pid and process name
+ */
+//void make_memps_log(enum mem_log path, pid_t pid, char *victim_name);
+
+void lowmem_memory_init(unsigned long long service_limit_bytes, unsigned long long widget_limit_bytes,
+               unsigned long long guiapp_limit_bytes, unsigned long long bgapp_limit_bytes);
+void lowmem_action_init(int service_action, int widget_action,
+               int guiapp_action, int bgapp_action);
+int lowmem_limit_set_app(unsigned long long limit_bytes, struct proc_app_info *pai,
+               enum proc_action action);
+int lowmem_limit_set_system_service(pid_t pid, unsigned long long limit_bytes,
+               const char *name, enum proc_action action);
+void lowmem_dbus_init(void);
+int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold);
+void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes);
+void lowmem_change_memory_state(int state, int force);
+unsigned long lowmem_get_ktotalram(void);
+unsigned long long lowmem_get_totalram(void);
+void lowmem_trigger_swap(pid_t pid, char *path, bool move);
+void lowmem_limit_init(void);
+void lowmem_limit_exit(void);
+int lowmem_limit_move_cgroup(struct proc_app_info *pai);
+int lowmem_reassign_limit(const char *dir,
+               unsigned long long limit_bytes, enum proc_action action);
+unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk);
+bool lowmem_fragmentated(void);
+unsigned int lowmem_get_proactive_thres(void);
+void lowmem_system_init();
+void lowmem_system_exit();
+
+/**
+ * @desc restore memory cgroup from pid when resourced is restarted
+ */
+void lowmem_restore_memcg(struct proc_app_info *pai);
+
+/*
+ * Return memcg pointer to selected cgroup.
+ */
+
+enum oom_killer_cb_flags {
+       OOM_NONE                = 0x0,          /* for main oom killer thread */
+       OOM_FORCE               = (0x1 << 0),   /* for forced kill */
+       OOM_NOMEMORY_CHECK      = (0x1 << 1),   /* check victims' memory */
+       /*------------------------------------------------------------------*/
+       OOM_IN_DEPTH            = (0x1 << 2),   /* consider all possible cgroups */
+       OOM_SINGLE_SHOT         = (0x1 << 3),   /* do not retry if failed to reclaim memory */
+       OOM_REVISE              = (0x1 << 4),   /* Run another attemp case failed for the first time*/
+       OOM_DROP                = (0x1 << 5),   /* deactivate the worker */
+};
+
+/**
+ * @brief  Low memory killer status
+ *
+ * LOWMEM_RECLAIM_NONE: no potential candidates for memory reclaim
+ * LOWMEM_RECLAIM_DONE: requested size of memory has been successfully
+ *                     reclaimed through terminating number of processes
+ * LOWMEM_RECLAIM_DROP: the whole reclaim procedure should be dropped
+ * LOWMEM_RECLAIM_CONT: selected process might be considered as a potential
+ *                     memory reclaim source - green light for terminating
+ *                     the process
+ * LOWMEM_RECLAIM_RETRY : check again after some seconds
+ *                     because killing processes will take some time.
+ * LOWMEM_RECLAIM_NEXT_TYPE : no potential candidates for memory reclaim in the current type
+ */
+enum {
+       LOWMEM_RECLAIM_NONE,
+       LOWMEM_RECLAIM_DONE,
+       LOWMEM_RECLAIM_DROP,
+       LOWMEM_RECLAIM_CONT,
+       LOWMEM_RECLAIM_RETRY,
+       LOWMEM_RECLAIM_NEXT_TYPE
+};
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__LOWMEM_HANDLER_H__*/
diff --git a/src/resource-limiter/memory/vmpressure-lowmem-handler.c b/src/resource-limiter/memory/vmpressure-lowmem-handler.c
deleted file mode 100644 (file)
index 25d46e4..0000000
+++ /dev/null
@@ -1,1938 +0,0 @@
-/*
- * resourced
- *
- * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * @file vmpressure-lowmem-handler.c
- *
- * @desc lowmem handler using memcgroup
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd. All rights reserved.
- *
- */
-
-#include <stdio.h>
-#include <fcntl.h>
-#include <assert.h>
-#include <limits.h>
-#include <vconf.h>
-#include <unistd.h>
-#include <time.h>
-#include <limits.h>
-#include <dirent.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/shm.h>
-#include <sys/sysinfo.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <ctype.h>
-#include <bundle.h>
-#include <eventsystem.h>
-#include <malloc.h>
-
-#include "trace.h"
-#include "cgroup.h"
-#include "lowmem-handler.h"
-#include "proc-common.h"
-#include "procfs.h"
-#include "freezer.h"
-#include "resourced.h"
-#include "macro.h"
-#include "notifier.h"
-#include "config-parser.h"
-#include "module.h"
-#include "swap-common.h"
-#include "cgroup.h"
-#include "memory-cgroup.h"
-#include "heart-common.h"
-#include "proc-main.h"
-#include "dbus-handler.h"
-#include "util.h"
-#include "fd-handler.h"
-#include "resourced-helper-worker.h"
-#include "safe-kill.h"
-#include "dedup-common.h"
-
-#define LOWMEM_THRES_INIT                   0
-
-#define MAX_VICTIMS_BETWEEN_CHECK           3
-#define MAX_PROACTIVE_HIGH_VICTIMS          4
-#define FOREGROUND_VICTIMS                  1
-#define OOM_KILLER_PRIORITY                 -20
-#define THRESHOLD_MARGIN                    10   /* MB */
-
-#define MEM_SIZE_64                         64   /* MB */
-#define MEM_SIZE_256                        256  /* MB */
-#define MEM_SIZE_448                        448  /* MB */
-#define MEM_SIZE_512                        512  /* MB */
-#define MEM_SIZE_768                        768  /* MB */
-#define MEM_SIZE_1024                       1024 /* MB */
-#define MEM_SIZE_2048                       2048 /* MB */
-
-/* thresholds for 64M RAM*/
-#define PROACTIVE_64_THRES                  10 /* MB */
-#define PROACTIVE_64_LEAVE                  30 /* MB */
-#define CGROUP_ROOT_64_THRES_DEDUP          16 /* MB */
-#define CGROUP_ROOT_64_THRES_SWAP           15 /* MB */
-#define CGROUP_ROOT_64_THRES_LOW            8  /* MB */
-#define CGROUP_ROOT_64_THRES_MEDIUM         5  /* MB */
-#define CGROUP_ROOT_64_THRES_LEAVE          8  /* MB */
-#define CGROUP_ROOT_64_NUM_VICTIMS          1
-
-/* thresholds for 256M RAM */
-#define PROACTIVE_256_THRES                 50 /* MB */
-#define PROACTIVE_256_LEAVE                 80 /* MB */
-#define CGROUP_ROOT_256_THRES_DEDUP         60 /* MB */
-#define CGROUP_ROOT_256_THRES_SWAP          40 /* MB */
-#define CGROUP_ROOT_256_THRES_LOW           20 /* MB */
-#define CGROUP_ROOT_256_THRES_MEDIUM        10 /* MB */
-#define CGROUP_ROOT_256_THRES_LEAVE         20 /* MB */
-#define CGROUP_ROOT_256_NUM_VICTIMS         2
-
-/* threshold for 448M RAM */
-#define PROACTIVE_448_THRES                 80  /* MB */
-#define PROACTIVE_448_LEAVE                 100 /* MB */
-#define CGROUP_ROOT_448_THRES_DEDUP         120 /* MB */
-#define CGROUP_ROOT_448_THRES_SWAP          100 /* MB */
-#define CGROUP_ROOT_448_THRES_LOW           60  /* MB */
-#define CGROUP_ROOT_448_THRES_MEDIUM        50  /* MB */
-#define CGROUP_ROOT_448_THRES_LEAVE         70  /* MB */
-#define CGROUP_ROOT_448_NUM_VICTIMS         5
-
-/* threshold for 512M RAM */
-#define PROACTIVE_512_THRES                 80  /* MB */
-#define PROACTIVE_512_LEAVE                 100 /* MB */
-#define CGROUP_ROOT_512_THRES_DEDUP         140 /* MB */
-#define CGROUP_ROOT_512_THRES_SWAP          100 /* MB */
-#define CGROUP_ROOT_512_THRES_LOW           70  /* MB */
-#define CGROUP_ROOT_512_THRES_MEDIUM        60  /* MB */
-#define CGROUP_ROOT_512_THRES_LEAVE         80  /* MB */
-#define CGROUP_ROOT_512_NUM_VICTIMS         5
-
-/* threshold for 768 RAM */
-#define PROACTIVE_768_THRES                 100 /* MB */
-#define PROACTIVE_768_LEAVE                 130 /* MB */
-#define CGROUP_ROOT_768_THRES_DEDUP         180 /* MB */
-#define CGROUP_ROOT_768_THRES_SWAP          150 /* MB */
-#define CGROUP_ROOT_768_THRES_LOW           90  /* MB */
-#define CGROUP_ROOT_768_THRES_MEDIUM        80  /* MB */
-#define CGROUP_ROOT_768_THRES_LEAVE         100 /* MB */
-#define CGROUP_ROOT_768_NUM_VICTIMS         5
-
-/* threshold for more than 1024M RAM */
-#define PROACTIVE_1024_THRES                150 /* MB */
-#define PROACTIVE_1024_LEAVE                230 /* MB */
-#define CGROUP_ROOT_1024_THRES_DEDUP        400 /* MB */
-#define CGROUP_ROOT_1024_THRES_SWAP         300 /* MB */
-#define CGROUP_ROOT_1024_THRES_LOW          120 /* MB */
-#define CGROUP_ROOT_1024_THRES_MEDIUM       100 /* MB */
-#define CGROUP_ROOT_1024_THRES_LEAVE        150 /* MB */
-#define CGROUP_ROOT_1024_NUM_VICTIMS        5
-
-/* threshold for more than 2048M RAM */
-#define PROACTIVE_2048_THRES                200 /* MB */
-#define PROACTIVE_2048_LEAVE                500 /* MB */
-#define CGROUP_ROOT_2048_THRES_DEDUP        400 /* MB */
-#define CGROUP_ROOT_2048_THRES_SWAP         300 /* MB */
-#define CGROUP_ROOT_2048_THRES_LOW          200 /* MB */
-#define CGROUP_ROOT_2048_THRES_MEDIUM       160 /* MB */
-#define CGROUP_ROOT_2048_THRES_LEAVE        300 /* MB */
-#define CGROUP_ROOT_2048_NUM_VICTIMS        10
-
-/* threshold for more than 3072M RAM */
-#define PROACTIVE_3072_THRES                300 /* MB */
-#define PROACTIVE_3072_LEAVE                700 /* MB */
-#define CGROUP_ROOT_3072_THRES_DEDUP        600 /* MB */
-#define CGROUP_ROOT_3072_THRES_SWAP         500 /* MB */
-#define CGROUP_ROOT_3072_THRES_LOW          400 /* MB */
-#define CGROUP_ROOT_3072_THRES_MEDIUM       250 /* MB */
-#define CGROUP_ROOT_3072_THRES_LEAVE        500 /* MB */
-#define CGROUP_ROOT_3072_NUM_VICTIMS        10
-
-static unsigned proactive_threshold_mb;
-static unsigned proactive_leave_mb;
-static unsigned lmk_start_threshold_mb;
-
-static char *event_level = MEMCG_DEFAULT_EVENT_LEVEL;
-
-/**
- * Resourced Low Memory Killer
- * NOTE: planned to be moved to a separate file.
- */
-/*-------------------------------------------------*/
-#define OOM_TIMER_INTERVAL_SEC 2
-#define LMW_LOOP_WAIT_TIMEOUT_MSEC     OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
-#define LMW_RETRY_WAIT_TIMEOUT_MSEC    (G_USEC_PER_SEC)
-
-struct lowmem_control {
-       /*
-        * For each queued request the following properties
-        * are required with two exceptions:
-        *  - status is being set by LMK
-        *  - callback is optional
-        */
-       /* Processing flags*/
-       unsigned int flags;
-       /* Indictator for OOM score of targeted processes */
-       enum oom_score score;
-
-       /* Desired size to be restored - level to be reached (MB)*/
-       unsigned int size_mb;
-       /* Max number of processes to be considered */
-       unsigned int count;
-       /* Memory reclaim status */
-       int status;
-       /*
-        * Optional - if set, will be triggered by LMK once the request
-        * is handled.
-        */
-       void (*callback) (struct lowmem_control *);
-};
-
-struct lowmem_worker {
-       pthread_t       worker_thread;
-       GAsyncQueue     *queue;
-       int             active;
-       int             running;
-};
-
-static struct lowmem_worker lmw;
-
-//static int memlog_enabled;
-//static int memlog_nr_max = DEFAULT_MEMLOG_NR_MAX;
-/* remove logfiles to reduce to this threshold.
- * it is about five-sixths of the memlog_nr_max. */
-//static int memlog_remove_batch_thres = (DEFAULT_MEMLOG_NR_MAX * 5) / 6;
-//static char *memlog_path = DEFAULT_MEMLOG_PATH;
-//static char *memlog_prefix[MEMLOG_MAX];
-
-#define LOWMEM_WORKER_IS_ACTIVE(_lmw)  g_atomic_int_get(&(_lmw)->active)
-#define LOWMEM_WORKER_ACTIVATE(_lmw)   g_atomic_int_set(&(_lmw)->active, 1)
-#define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
-
-#define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
-#define LOWMEM_WORKER_RUN(_lmw)        g_atomic_int_set(&(_lmw)->running, 1)
-#define LOWMEM_WORKER_IDLE(_lmw)       g_atomic_int_set(&(_lmw)->running, 0)
-
-#define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
-
-#define LOWMEM_DESTROY_REQUEST(_ctl)           \
-       g_slice_free(typeof(*(_ctl)), _ctl);    \
-
-#define LOWMEM_SET_REQUEST(c, __flags, __score, __size, __count, __cb) \
-{                                                                      \
-       (c)->flags      = __flags; (c)->score   = __score;              \
-       (c)->size_mb= __size;  (c)->count       = __count;              \
-       (c)->callback   = __cb;                                         \
-}
-
-#define BUFF_MAX        255
-#define APP_ATTR_PATH "/proc/%d/attr/current"
-
-static int get_privilege(pid_t pid, char *name, size_t len)
-{
-       char path[PATH_MAX];
-       char attr[BUFF_MAX];
-       size_t attr_len;
-       FILE *fp;
-
-       snprintf(path, sizeof(path), APP_ATTR_PATH, pid);
-
-       fp = fopen(path, "r");
-       if (!fp)
-               return -errno;
-
-       attr_len = fread(attr, 1, sizeof(attr) - 1, fp);
-       fclose(fp);
-       if (attr_len <= 0)
-               return -ENOENT;
-
-       attr[attr_len] = '\0';
-
-       snprintf(name, len, "%s", attr);
-       return 0;
-}
-
-static int is_app(pid_t pid)
-{
-       char attr[BUFF_MAX];
-       size_t len;
-       int ret;
-
-       ret = get_privilege(pid, attr, sizeof(attr));
-       if (ret < 0) {
-               _E("Failed to get privilege of PID(%d).", pid);
-               return -1;
-       }
-
-       len = strlen(attr) + 1;
-
-       if (!strncmp("System", attr, len))
-               return 0;
-
-       if (!strncmp("User", attr, len))
-               return 0;
-
-       if (!strncmp("System::Privileged", attr, len))
-               return 0;
-
-       return 1;
-}
-
-
-static void lowmem_queue_request(struct lowmem_worker *lmw,
-                               struct lowmem_control *ctl)
-{
-       if (LOWMEM_WORKER_IS_ACTIVE(lmw))
-               g_async_queue_push(lmw->queue, ctl);
-}
-
-/* internal */
-static void lowmem_drain_queue(struct lowmem_worker *lmw)
-{
-       struct lowmem_control *ctl;
-
-       g_async_queue_lock(lmw->queue);
-       while ((ctl = g_async_queue_try_pop_unlocked(lmw->queue))) {
-               if (ctl->callback)
-                       ctl->callback(ctl);
-               LOWMEM_DESTROY_REQUEST(ctl);
-       }
-       g_async_queue_unlock(lmw->queue);
-}
-
-static void lowmem_request_destroy(gpointer data)
-{
-       struct lowmem_control *ctl = (struct lowmem_control*) data;
-
-       if (ctl->callback)
-               ctl->callback(ctl);
-       LOWMEM_DESTROY_REQUEST(ctl);
-}
-
-/*-------------------------------------------------*/
-
-/* low memory action function for cgroup */
-/* low memory action function */
-static void high_mem_act(void);
-static void swap_activate_act(void);
-static void swap_compact_act(void);
-static void lmk_act(void);
-
-
-static size_t cur_mem_state = MEM_LEVEL_HIGH;
-static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
-static int num_vict_between_check = MAX_VICTIMS_BETWEEN_CHECK;
-
-static unsigned long long totalram_bytes;
-static unsigned long totalram_kb;
-
-static bool oom_popup_enable;
-static bool oom_popup;
-static bool memcg_swap_status;
-static int fragmentation_size;
-
-static const char *convert_cgroup_type_to_str(int type)
-{
-       static const char *type_table[] =
-       {"/", "Throttling"};
-       if (type >= MEMCG_ROOT && type <= MEMCG_THROTTLING)
-               return type_table[type];
-       else
-               return "Error";
-}
-
-static const char *convert_status_to_str(int status)
-{
-       static const char *status_table[] =
-       {"none", "done", "drop", "cont", "retry", "next_type"};
-       if(status >= LOWMEM_RECLAIM_NONE && status <= LOWMEM_RECLAIM_NEXT_TYPE)
-               return status_table[status];
-       return "error status";
-}
-
-static const char *convert_memstate_to_str(int mem_state)
-{
-       static const char *state_table[] = {"mem high", "mem medium",
-               "mem low", "mem critical", "mem oom",};
-       if (mem_state >= 0 && mem_state < MEM_LEVEL_MAX)
-               return state_table[mem_state];
-       return "";
-}
-
-static int lowmem_launch_oompopup(void)
-{
-       GVariantBuilder *const gv_builder = g_variant_builder_new(G_VARIANT_TYPE("a{ss}"));
-       g_variant_builder_add(gv_builder, "{ss}", "_SYSPOPUP_CONTENT_", "lowmemory_oom");
-
-       GVariant *const params = g_variant_new("(a{ss})", gv_builder);
-       g_variant_builder_unref(gv_builder);
-
-       int ret = d_bus_call_method_sync_gvariant(SYSTEM_POPUP_BUS_NAME,
-               SYSTEM_POPUP_PATH_SYSTEM, SYSTEM_POPUP_IFACE_SYSTEM,
-               "PopupLaunch", params);
-
-       g_variant_unref(params);
-
-       return ret;
-}
-
-static inline void get_total_memory(void)
-{
-       struct sysinfo si;
-       if (totalram_bytes)
-               return;
-
-       if (!sysinfo(&si)) {
-               totalram_bytes = (unsigned long long)si.totalram * si.mem_unit;
-               totalram_kb = BYTE_TO_KBYTE(totalram_bytes);
-
-               register_totalram_bytes(totalram_bytes);
-       }
-       else {
-               _E("Failed to get total ramsize from the kernel");
-       }
-}
-
-unsigned int lowmem_get_task_mem_usage_rss(const struct task_info *tsk)
-{
-       unsigned int size_kb = 0, total_size_kb = 0;
-       int index, ret;
-       pid_t pid;
-
-       /*
-        * If pids are allocated only when there are multiple processes with
-        * the same pgid e.g., browser and web process. Mostly, single process
-        * is used.
-        */
-       if (tsk->pids == NULL) {
-               ret = proc_get_ram_usage(tsk->pid, &size_kb);
-
-               /* If there is no proc entry for given pid the process
-                * should be abandoned during further processing
-                */
-               if (ret < 0)
-                       _D("failed to get rss memory usage of %d", tsk->pid);
-
-               return size_kb;
-       }
-
-       for (index = 0; index < tsk->pids->len; index++) {
-               pid = g_array_index(tsk->pids, pid_t, index);
-               ret = proc_get_ram_usage(pid, &size_kb);
-               if (ret != RESOURCED_ERROR_NONE)
-                       continue;
-               total_size_kb += size_kb;
-       }
-
-       return total_size_kb;
-}
-
-static int lowmem_kill_victim(const struct task_info *tsk,
-               int flags, int memps_log, unsigned int *victim_size)
-{
-       pid_t pid;
-       int ret;
-       char appname[PATH_MAX];
-       int sigterm = 0;
-       struct proc_app_info *pai;
-
-       pid = tsk->pid;
-
-       if (pid <= 0 || pid == getpid())
-               return RESOURCED_ERROR_FAIL;
-
-       ret = proc_get_cmdline(pid, appname, sizeof appname);
-       if (ret == RESOURCED_ERROR_FAIL)
-               return RESOURCED_ERROR_FAIL;
-
-       if (!strcmp("memps", appname) ||
-           !strcmp("crash-worker", appname) ||
-           !strcmp("system-syspopup", appname)) {
-               _E("%s(%d) was selected, skip it", appname, pid);
-               return RESOURCED_ERROR_FAIL;
-       }
-
-       pai = tsk->pai;
-       if (pai) {
-               resourced_proc_status_change(PROC_CGROUP_SET_TERMINATE_REQUEST,
-                       pid, NULL, NULL, PROC_TYPE_NONE);
-
-               if (tsk->oom_score_lru <= OOMADJ_BACKGRD_LOCKED) {
-                       sigterm = 1;
-               } else if (tsk->oom_score_lru > OOMADJ_BACKGRD_LOCKED && tsk->oom_score_lru < OOMADJ_BACKGRD_UNLOCKED) {
-                       int app_flag = pai->flags;
-                       sigterm = app_flag & PROC_SIGTERM;
-               }
-
-               if (pai->memory.oom_killed)
-                       sigterm = 0;
-
-               pai->memory.oom_killed = true;
-       }
-
-       if (sigterm)
-               safe_kill(pid, SIGTERM);
-       else
-               safe_kill(pid, SIGKILL);
-
-       _D("[LMK] we killed, force(%d), %d (%s) score = %d, size: rss = %u KB, sigterm = %d\n",
-          flags & OOM_FORCE, pid, appname, tsk->oom_score_adj,
-          tsk->size, sigterm);
-       *victim_size = tsk->size;
-
-       if (tsk->oom_score_lru > OOMADJ_FOREGRD_UNLOCKED)
-               return RESOURCED_ERROR_NONE;
-
-       if (oom_popup_enable && !oom_popup) {
-               lowmem_launch_oompopup();
-               oom_popup = true;
-       }
-
-       return RESOURCED_ERROR_NONE;
-}
-
-/* return LOWMEM_RECLAIM_CONT when killing should be continued */
-static int lowmem_check_kill_continued(struct task_info *tsk, int flags)
-{
-       unsigned int available_mb;
-
-       /*
-        * Processes with the priority higher than perceptible are killed
-        * only when the available memory is less than dynamic oom threshold.
-        */
-       if (tsk->oom_score_lru > OOMADJ_BACKGRD_PERCEPTIBLE)
-               return LOWMEM_RECLAIM_CONT;
-
-       if (flags & (OOM_FORCE|OOM_SINGLE_SHOT)) {
-               _I("[LMK] %d is dropped during force kill, flag=%d",
-                       tsk->pid, flags);
-               return LOWMEM_RECLAIM_DROP;
-       }
-       available_mb = proc_get_mem_available();
-       if (available_mb > lmk_start_threshold_mb) {
-               _I("[LMK] available=%d MB, larger than %u MB, do not kill foreground",
-                       available_mb, lmk_start_threshold_mb);
-               return LOWMEM_RECLAIM_RETRY;
-       }
-       return LOWMEM_RECLAIM_CONT;
-}
-
-static int compare_victims(const struct task_info *ta, const struct task_info *tb)
-{
-        unsigned int pa, pb;
-
-       assert(ta != NULL);
-       assert(tb != NULL);
-       /*
-        * followed by kernel badness point calculation using heuristic.
-        * oom_score_adj is normalized by its unit, which varies -1000 ~ 1000.
-        */
-       pa = ta->oom_score_lru * (totalram_kb / 2000) + ta->size;
-       pb = tb->oom_score_lru * (totalram_kb / 2000) + tb->size;
-
-       return pb - pa;
-}
-
-static void lowmem_free_task_info_array(GArray *array)
-{
-       int i;
-
-       for (i = 0; i < array->len; i++) {
-               struct task_info *tsk;
-
-               tsk = &g_array_index(array, struct task_info, i);
-               if (tsk->pids)
-                       g_array_free(tsk->pids, true);
-       }
-
-       g_array_free(array, true);
-}
-
-static inline int is_dynamic_process_killer(int flags)
-{
-       return (flags & OOM_FORCE) && !(flags & OOM_NOMEMORY_CHECK);
-}
-
-static unsigned int is_memory_recovered(unsigned int *avail, unsigned int thres)
-{
-       unsigned int available = proc_get_mem_available();
-       unsigned int should_be_freed_mb = 0;
-
-       if (available < thres)
-               should_be_freed_mb = thres - available;
-       /*
-        * free THRESHOLD_MARGIN more than real should be freed,
-        * because launching app is consuming up the memory.
-        */
-       if (should_be_freed_mb > 0)
-               should_be_freed_mb += THRESHOLD_MARGIN;
-
-       *avail = available;
-
-       return should_be_freed_mb;
-}
-
-static int lowmem_get_pids_proc(GArray *pids)
-{
-       DIR *dp;
-       struct dirent *dentry;
-
-       dp = opendir("/proc");
-       if (!dp) {
-               _E("fail to open /proc");
-               return RESOURCED_ERROR_FAIL;
-       }
-       while ((dentry = readdir(dp)) != NULL) {
-               struct task_info tsk;
-               pid_t pid = 0, pgid = 0;
-               int oom = 0;
-
-               if (!isdigit(dentry->d_name[0]))
-                       continue;
-
-               pid = (pid_t)atoi(dentry->d_name);
-               if (pid < 1)
-                       /* skip invalid pids or kernel processes */
-                       continue;
-
-               pgid = getpgid(pid);
-               if (pgid < 1)
-                       continue;
-
-               if(is_app(pid) != 1)
-                       continue;
-
-               if (proc_get_oom_score_adj(pid, &oom) < 0) {
-                       _D("pid(%d) was already terminated", pid);
-                       continue;
-               }
-
-               /*
-                * Check whether this array includes applications or not.
-                * If it doesn't require to get applications
-                * and pid has been already included in pai,
-                * skip to append.
-                */
-               if (oom > OOMADJ_SU && oom <= OOMADJ_APP_MAX)
-                       continue;
-
-               /*
-                * Currently, for tasks in the memory cgroup,
-                * do not consider multiple tasks with one pgid.
-                */
-               tsk.pid = pid;
-               tsk.pgid = pgid;
-               tsk.oom_score_adj = oom;
-               tsk.oom_score_lru = oom;
-               tsk.pids = NULL;
-               tsk.size = lowmem_get_task_mem_usage_rss(&tsk);
-               tsk.pai = NULL;
-
-               g_array_append_val(pids, tsk);
-       }
-
-       closedir(dp);
-       return RESOURCED_ERROR_NONE;
-}
-
-/**
- * @brief Terminate up to max_victims processes after finding them from pai.
-       It depends on proc_app_info lists
-       and it also reference systemservice cgroup
-       because some processes in this group don't have proc_app_info.
- *
- * @max_victims:           max number of processes to be terminated
- * @start_oom:     find victims from start oom adj score value
- * @end_oom: find victims to end oom adj score value
- * @should_be_freed: amount of memory to be reclaimed (in MB)
- * @total_size[out]: total size of possibly reclaimed memory (required)
- * @completed:     final outcome (optional)
- * @threshold:         desired value of memory available
- */
-static int lowmem_kill_victims(int max_victims,
-       int start_oom, int end_oom, unsigned should_be_freed, int flags,
-       unsigned int *total_size, int *completed, unsigned int threshold)
-{
-       int total_count = 0;
-       GSList *proc_app_list = NULL;
-       int i, ret, victim = 0;
-       unsigned int victim_size = 0;
-       unsigned int total_victim_size = 0;
-       int status = LOWMEM_RECLAIM_NONE;
-       GArray *candidates = NULL;
-       GSList *iter, *iterchild;
-       struct proc_app_info *pai = NULL;
-       int oom_score_adj;
-       int should_be_freed_kb = MBYTE_TO_KBYTE(should_be_freed);
-
-       candidates = g_array_new(false, false, sizeof(struct task_info));
-
-       proc_app_list = proc_app_list_open();
-       gslist_for_each_item(iter, proc_app_list) {
-               struct task_info ti;
-
-               total_count++;
-               pai = (struct proc_app_info *)iter->data;
-               if (!pai->main_pid)
-                       continue;
-
-               oom_score_adj = pai->memory.oom_score_adj;
-               if (oom_score_adj > end_oom || oom_score_adj < start_oom)
-                       continue;
-
-               if ((flags & OOM_REVISE) && pai->memory.oom_killed)
-                       continue;
-
-               ti.pid = pai->main_pid;
-               ti.pgid = getpgid(ti.pid);
-               ti.oom_score_adj = oom_score_adj;
-               ti.pai = pai;
-
-               /*
-                * Before oom_score_adj of favourite (oom_score = 270) applications is
-                * independent of lru_state, now we consider lru_state, while
-                * killing favourite process.
-                */
-
-               if (oom_score_adj == OOMADJ_FAVORITE && pai->lru_state >= PROC_BACKGROUND)
-                       ti.oom_score_lru = OOMADJ_FAVORITE + OOMADJ_FAVORITE_APP_INCREASE * pai->lru_state;
-               else
-                       ti.oom_score_lru = oom_score_adj;
-
-               if (pai->childs) {
-                       ti.pids = g_array_new(false, false, sizeof(pid_t));
-                       g_array_append_val(ti.pids, ti.pid);
-                       gslist_for_each_item(iterchild, pai->childs) {
-                               pid_t child = GPOINTER_TO_PID(iterchild->data);
-                               g_array_append_val(ti.pids, child);
-                       }
-               } else
-                       ti.pids = NULL;
-
-               g_array_append_val(candidates, ti);
-       }
-
-       proc_app_list_close();
-
-       if (!candidates->len) {
-               status = LOWMEM_RECLAIM_NEXT_TYPE;
-               goto leave;
-       }
-       else {
-               _D("[LMK] candidate ratio=%d/%d", candidates->len, total_count);
-       }
-
-       for (i = 0; i < candidates->len; i++) {
-               struct task_info *tsk;
-
-               tsk = &g_array_index(candidates, struct task_info, i);
-               tsk->size = lowmem_get_task_mem_usage_rss(tsk);                 /* KB */
-       }
-
-       /*
-        * In case of start_oom == OOMADJ_SU,
-        * we're going to try to kill some of processes in /proc
-        * to handle low memory situation.
-        * It can find malicious system process even though it has low oom score.
-        */
-       if (start_oom == OOMADJ_SU)
-               lowmem_get_pids_proc(candidates);
-
-       g_array_sort(candidates, (GCompareFunc)compare_victims);
-
-       for (i = 0; i < candidates->len; i++) {
-               struct task_info *tsk;
-
-               if (i >= max_victims) {
-                       status = LOWMEM_RECLAIM_NEXT_TYPE;
-                       break;
-               }
-
-               /*
-                * Available memory is checking only every
-                * num_vict_between_check process for reducing burden.
-                */
-               if (!(i % num_vict_between_check)) {
-                       if (proc_get_mem_available() > threshold) {
-                               status = LOWMEM_RECLAIM_DONE;
-                               break;
-                       }
-               }
-
-               if (!(flags & OOM_NOMEMORY_CHECK) &&
-                   total_victim_size >= should_be_freed_kb) {
-                       _D("[LMK] victim=%d, max_victims=%d, total_size=%uKB",
-                               victim, max_victims, total_victim_size);
-                       status = LOWMEM_RECLAIM_DONE;
-                       break;
-               }
-
-               tsk = &g_array_index(candidates, struct task_info, i);
-
-               status = lowmem_check_kill_continued(tsk, flags);
-               if (status != LOWMEM_RECLAIM_CONT)
-                       break;
-
-               _I("[LMK] select victims from proc_app_list pid(%d) with oom_score_adj(%d)\n", tsk->pid, tsk->oom_score_adj);
-
-               ret = lowmem_kill_victim(tsk, flags, i, &victim_size);
-               if (ret != RESOURCED_ERROR_NONE)
-                       continue;
-               victim++;
-               total_victim_size += victim_size;
-       }
-
-leave:
-       lowmem_free_task_info_array(candidates);
-       *total_size = total_victim_size;
-       if(*completed != LOWMEM_RECLAIM_CONT)
-               *completed = status;
-       else
-               *completed = LOWMEM_RECLAIM_NEXT_TYPE;
-       return victim;
-}
-
-static int calculate_range_of_oom(enum oom_score score, int *min, int *max)
-{
-       if (score > OOM_SCORE_MAX || score < OOM_SCORE_HIGH) {
-               _E("[LMK] oom score (%d) is out of scope", score);
-               return RESOURCED_ERROR_FAIL;
-       }
-
-       *max = cgroup_get_highest_oom_score_adj(score);
-       *min = cgroup_get_lowest_oom_score_adj(score);
-
-       return RESOURCED_ERROR_NONE;
-}
-
-static void lowmem_handle_request(struct lowmem_control *ctl)
-{
-       int start_oom, end_oom;
-       int count = 0, victim_cnt = 0;
-       int max_victim_cnt = ctl->count;
-       int status = LOWMEM_RECLAIM_NONE;
-       unsigned int available_mb = 0;
-       unsigned int total_size_mb = 0;
-       unsigned int current_size = 0;
-       unsigned int reclaim_size_mb, shortfall_mb = 0;
-       enum oom_score oom_score = ctl->score;
-
-       available_mb = proc_get_mem_available();
-       reclaim_size_mb = ctl->size_mb  > available_mb                  /* MB */
-                    ? ctl->size_mb - available_mb : 0;
-
-       if (!reclaim_size_mb) {
-               status = LOWMEM_RECLAIM_DONE;
-               goto done;
-       }
-
-retry:
-       /* Prepare LMK to start doing it's job. Check preconditions. */
-       if (calculate_range_of_oom(oom_score, &start_oom, &end_oom))
-               goto done;
-
-       lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
-       shortfall_mb = is_memory_recovered(&available_mb, ctl->size_mb);
-
-       if (!shortfall_mb || !reclaim_size_mb) {
-               status = LOWMEM_RECLAIM_DONE;
-               goto done;
-       }
-
-       /* precaution */
-       current_size = 0;
-       victim_cnt = lowmem_kill_victims(max_victim_cnt, start_oom, end_oom,
-                           reclaim_size_mb, ctl->flags, &current_size, &status, ctl->size_mb);
-
-       if (victim_cnt) {
-               current_size = KBYTE_TO_MBYTE(current_size);
-               reclaim_size_mb -= reclaim_size_mb > current_size
-                       ? current_size : reclaim_size_mb;
-               total_size_mb += current_size;
-               count += victim_cnt;
-               _I("[LMK] current: kill %d victims,  reclaim_size=%uMB from %d to %d status=%s",
-                               victim_cnt, current_size,
-                               start_oom, end_oom, convert_status_to_str(status));
-       }
-
-       if ((status == LOWMEM_RECLAIM_DONE) ||
-           (status == LOWMEM_RECLAIM_DROP) ||
-           (status == LOWMEM_RECLAIM_RETRY))
-               goto done;
-
-       /*
-        * If it doesn't finish reclaiming memory in first operation,
-               - if flags has OOM_IN_DEPTH,
-                  try to find victims again in the active cgroup.
-                  otherwise, just return because there is no more victims in the desired cgroup.
-               - if flags has OOM_REVISE,
-                  it means that resourced can't find victims from proc_app_list.
-                  So, it should search victims or malicious process from /proc.
-                  But searching /proc leads to abnormal behaviour.
-                  (Make sluggish or kill same victims continuously)
-                  Thus, otherwise, just return in first operation and wait some period.
-        */
-       if (oom_score == OOM_SCORE_LOW) {
-               oom_score = OOM_SCORE_MEDIUM;
-               goto retry;
-       } else if ((oom_score == OOM_SCORE_MEDIUM) && (ctl->flags & OOM_IN_DEPTH)) {
-               oom_score = OOM_SCORE_HIGH;
-               if(ctl->flags & OOM_FORCE)
-                       max_victim_cnt = FOREGROUND_VICTIMS;
-               goto retry;
-       } else if ((oom_score == OOM_SCORE_HIGH) && (ctl->flags & OOM_IN_DEPTH)) {
-               status = LOWMEM_RECLAIM_RETRY;
-               ctl->score = OOM_SCORE_MAX;
-       }
-       else if (oom_score == OOM_SCORE_MAX) {
-               status = LOWMEM_RECLAIM_RETRY;
-       }
-done:
-       _I("[LMK] Done: killed %d processes reclaimed=%uMB remaining=%uMB shortfall=%uMB status=%s",
-               count, total_size_mb, reclaim_size_mb, shortfall_mb, convert_status_to_str(status));
-
-       /* After we finish reclaiming it's worth to remove oldest memps logs */
-       ctl->status = status;
-}
-
-static void *lowmem_reclaim_worker(void *arg)
-{
-       struct lowmem_worker *lmw = (struct lowmem_worker *)arg;
-
-       setpriority(PRIO_PROCESS, 0, OOM_KILLER_PRIORITY);
-
-       g_async_queue_ref(lmw->queue);
-
-       while (1) {
-               int try_count = 0;
-               struct lowmem_control *ctl;
-
-               LOWMEM_WORKER_IDLE(lmw);
-               /* Wait on any wake-up call */
-               ctl = g_async_queue_pop(lmw->queue);
-
-               if (!ctl) {
-                       _W("[LMK] ctl structure is NULL");
-                       continue;
-               }
-
-               if ((ctl->flags & OOM_DROP) || !LOWMEM_WORKER_IS_ACTIVE(lmw)) {
-                       LOWMEM_DESTROY_REQUEST(ctl);
-                       break;
-               }
-
-               LOWMEM_WORKER_RUN(lmw);
-process_again:
-               _D("[LMK] %d tries", ++try_count);
-               lowmem_handle_request(ctl);
-               /**
-                * Case the process failed to reclaim requested amount of memory
-                * or still under have memory pressure - try the timeout wait.
-                * There is a chance this will get woken-up in a better reality.
-                */
-               if (ctl->status == LOWMEM_RECLAIM_RETRY &&
-                   !(ctl->flags & OOM_SINGLE_SHOT)) {
-                       unsigned int available_mb = proc_get_mem_available();
-
-                       if (available_mb >= ctl->size_mb) {
-                               _I("[LMK] Memory restored: requested=%uMB available=%uMB\n",
-                                       ctl->size_mb, available_mb);
-                               ctl->status = LOWMEM_RECLAIM_DONE;
-                               if (ctl->callback)
-                                       ctl->callback(ctl);
-                               LOWMEM_DESTROY_REQUEST(ctl);
-                               LOWMEM_WORKER_IDLE(lmw);
-                               continue;
-                       }
-
-                       if (LOWMEM_WORKER_IS_ACTIVE(lmw)) {
-                               g_usleep(LMW_RETRY_WAIT_TIMEOUT_MSEC);
-                               ctl->flags |= OOM_REVISE;
-                               goto process_again;
-                       }
-               }
-
-               /*
-                * The ctl callback would check available size again.
-                * And it is last point in reclaiming worker.
-                * Resourced sent SIGKILL signal to victim processes
-                * so it should wait for a some seconds until each processes returns memory.
-                */
-               g_usleep(LMW_LOOP_WAIT_TIMEOUT_MSEC);
-               if (ctl->callback)
-                       ctl->callback(ctl);
-
-               /* The lmk becomes the owner of all queued requests .. */
-               LOWMEM_DESTROY_REQUEST(ctl);
-               LOWMEM_WORKER_IDLE(lmw);
-       }
-       g_async_queue_unref(lmw->queue);
-       pthread_exit(NULL);
-}
-
-static void change_lowmem_state(unsigned int mem_state)
-{
-       cur_mem_state = mem_state;
-       lmk_start_threshold_mb = get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM];
-
-       resourced_notify(RESOURCED_NOTIFIER_MEM_LEVEL_CHANGED,
-               (void *)&cur_mem_state);
-}
-
-/* only app can call this function
- * that is, service cannot call the function
- */
-static void lowmem_swap_memory(char *path)
-{
-       unsigned int available_mb;
-
-       if (cur_mem_state == MEM_LEVEL_HIGH)
-               return;
-
-       if (swap_get_state() != SWAP_ON)
-               return;
-
-       available_mb = proc_get_mem_available();
-       if (cur_mem_state != MEM_LEVEL_LOW &&
-           available_mb <= get_root_memcg_info()->threshold_mb[MEM_LEVEL_LOW])
-               swap_activate_act();
-
-       resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
-       memcg_swap_status = true;
-}
-
-void lowmem_trigger_swap(pid_t pid, char *path, bool move)
-{
-       int error;
-       int oom_score_adj;
-       int lowest_oom_score_adj;
-
-       if (!path) {
-               _E("[SWAP] Unknown memory cgroup path to swap");
-               return;
-       }
-
-       /* In this case, corresponding process will be moved to memory MEMCG_THROTTLING.
-        */
-       if (move) {
-               error = proc_get_oom_score_adj(pid, &oom_score_adj);
-               if (error) {
-                       _E("[SWAP] Cannot get oom_score_adj of pid (%d)", pid);
-                       return;
-               }
-
-               lowest_oom_score_adj = cgroup_get_lowest_oom_score_adj(OOM_SCORE_LOW);
-
-               if (oom_score_adj < lowest_oom_score_adj) {
-                       oom_score_adj = lowest_oom_score_adj;
-                       /* End of this funciton, 'lowmem_swap_memory()' funciton will be called */
-                       proc_set_oom_score_adj(pid, oom_score_adj, find_app_info(pid));
-                       return;
-               }
-       }
-
-       /* Correponding process is already managed per app or service.
-        * In addition, if some process is already located in the MEMCG_THROTTLING, then just do swap
-        */
-       resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
-}
-
-static void memory_level_send_system_event(int lv)
-{
-       bundle *b;
-       const char *str;
-
-       switch (lv) {
-               case MEM_LEVEL_HIGH:
-               case MEM_LEVEL_MEDIUM:
-               case MEM_LEVEL_LOW:
-                       str = EVT_VAL_MEMORY_NORMAL;
-                       break;
-               case MEM_LEVEL_CRITICAL:
-                       str = EVT_VAL_MEMORY_SOFT_WARNING;
-                       break;
-               case MEM_LEVEL_OOM:
-                       str = EVT_VAL_MEMORY_HARD_WARNING;
-                       break;
-               default:
-                       _E("Invalid state");
-                       return;
-       }
-
-       b = bundle_create();
-       if (!b) {
-               _E("Failed to create bundle");
-               return;
-       }
-
-       bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
-       eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
-       bundle_free(b);
-}
-
-static void high_mem_act(void)
-{
-       int ret, status;
-
-       ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
-       if (ret)
-               _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
-       if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
-               vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
-                             VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
-               memory_level_send_system_event(MEM_LEVEL_HIGH);
-       }
-
-       change_lowmem_state(MEM_LEVEL_HIGH);
-
-       if (swap_get_state() == SWAP_ON && memcg_swap_status) {
-               resourced_notify(RESOURCED_NOTIFIER_SWAP_UNSET_LIMIT, get_memcg_info(MEMCG_THROTTLING));
-               memcg_swap_status = false;
-       }
-       if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
-               resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
-                       (void *)CGROUP_FREEZER_ENABLED);
-}
-
-static void swap_activate_act(void)
-{
-       int ret, status;
-
-       ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
-       if (ret)
-               _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
-
-       if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
-               vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
-                               VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
-               memory_level_send_system_event(MEM_LEVEL_LOW);
-       }
-       change_lowmem_state(MEM_LEVEL_LOW);
-       if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
-               resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
-                       (void *)CGROUP_FREEZER_ENABLED);
-
-       if (swap_get_state() != SWAP_ON)
-               resourced_notify(RESOURCED_NOTIFIER_SWAP_ACTIVATE, NULL);
-}
-
-static void dedup_act(enum ksm_scan_mode mode)
-{
-       int ret, status;
-       int data;
-
-       if (dedup_get_state() != DEDUP_ONE_SHOT)
-               return;
-
-       if (proc_get_freezer_status() == CGROUP_FREEZER_PAUSED)
-               resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
-                               (void *)CGROUP_FREEZER_ENABLED);
-
-       if (mode == KSM_SCAN_PARTIAL) {
-               ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
-               if (ret)
-                       _E("vconf get failed %s", VCONFKEY_SYSMAN_LOW_MEMORY);
-
-               if (status != VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL) {
-                       vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
-                                       VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL);
-                       memory_level_send_system_event(MEM_LEVEL_MEDIUM);
-               }
-               change_lowmem_state(MEM_LEVEL_MEDIUM);
-
-               data = KSM_SCAN_PARTIAL;
-               resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
-       } else if (mode == KSM_SCAN_FULL) {
-               data = KSM_SCAN_FULL;
-               resourced_notify(RESOURCED_NOTIFIER_DEDUP_SCAN, &data);
-       }
-}
-
-static void swap_compact_act(void)
-{
-       change_lowmem_state(MEM_LEVEL_CRITICAL);
-       resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_CRITICAL);
-       memory_level_send_system_event(MEM_LEVEL_CRITICAL);
-}
-
-static void medium_cb(struct lowmem_control *ctl)
-{
-       if (ctl->status == LOWMEM_RECLAIM_DONE)
-               oom_popup = false;
-       lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
-}
-
-static void lmk_act(void)
-{
-       unsigned int available_mb;
-       int ret;
-       int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
-
-       /*
-        * Don't trigger reclaim worker
-        * if it is already running
-        */
-       if (LOWMEM_WORKER_IS_RUNNING(&lmw))
-               return;
-
-       ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
-       if (ret)
-               _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
-
-       memory_level_send_system_event(MEM_LEVEL_OOM);
-       if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
-               if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
-                       resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
-                               (void *)CGROUP_FREEZER_PAUSED);
-               vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
-                             VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
-       }
-       available_mb = proc_get_mem_available();
-
-       change_lowmem_state(MEM_LEVEL_OOM);
-
-       if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
-               struct lowmem_control *ctl;
-
-               ctl = LOWMEM_NEW_REQUEST();
-               if (ctl) {
-                       LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
-                               OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
-                               num_max_victims, medium_cb);
-                       lowmem_queue_request(&lmw, ctl);
-               }
-       }
-
-       resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
-
-       /*
-        * Flush resourced memory such as other processes.
-        * Resourced can use both many fast bins and sqlite3 cache memery.
-        */
-       malloc_trim(0);
-
-       return;
-}
-
-static void lowmem_trigger_memory_state_action(int mem_state)
-{
-       /*
-        * Check if the state we want to set is different from current
-        * But it should except this condition if mem_state is already medium.
-        * Otherwise, recalim worker couldn't run any more.
-        */
-       if (mem_state != MEM_LEVEL_OOM && cur_mem_state == mem_state)
-               return;
-
-       switch (mem_state) {
-       case MEM_LEVEL_HIGH:
-               high_mem_act();
-               break;
-       case MEM_LEVEL_MEDIUM:
-               dedup_act(KSM_SCAN_PARTIAL);
-               break;
-       case MEM_LEVEL_LOW:
-               swap_activate_act();
-               break;
-       case MEM_LEVEL_CRITICAL:
-               dedup_act(KSM_SCAN_FULL);
-               swap_compact_act();
-               break;
-       case MEM_LEVEL_OOM:
-               lmk_act();
-               break;
-       default:
-               assert(0);
-       }
-}
-
-static unsigned int check_mem_state(unsigned int available_mb)
-{
-       int mem_state;
-       for (mem_state = MEM_LEVEL_MAX - 1; mem_state > MEM_LEVEL_HIGH; mem_state--) {
-               if (mem_state != MEM_LEVEL_OOM &&
-                               available_mb <= get_root_memcg_info()->threshold_mb[mem_state])
-                       break;
-               else if (mem_state == MEM_LEVEL_OOM && available_mb <= lmk_start_threshold_mb)
-                       break;
-       }
-
-       return mem_state;
-}
-
-/* setup memcg parameters depending on total ram size. */
-static void setup_memcg_params(void)
-{
-       unsigned long total_ramsize_mb;
-
-       get_total_memory();
-       total_ramsize_mb = BYTE_TO_MBYTE(totalram_bytes);
-
-       _D("Total: %lu MB", total_ramsize_mb);
-       if (total_ramsize_mb <= MEM_SIZE_64) {
-               /* set thresholds for ram size 64M */
-               proactive_threshold_mb = PROACTIVE_64_THRES;
-               proactive_leave_mb = PROACTIVE_64_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_64_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_64_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_64_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_64_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_64_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_64_NUM_VICTIMS;
-       } else if (total_ramsize_mb <= MEM_SIZE_256) {
-               /* set thresholds for ram size 256M */
-               proactive_threshold_mb = PROACTIVE_256_THRES;
-               proactive_leave_mb = PROACTIVE_256_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_256_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_256_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_256_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_256_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_256_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_256_NUM_VICTIMS;
-       } else if (total_ramsize_mb <= MEM_SIZE_448) {
-               /* set thresholds for ram size 448M */
-               proactive_threshold_mb = PROACTIVE_448_THRES;
-               proactive_leave_mb = PROACTIVE_448_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_448_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_448_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_448_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_448_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_448_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_448_NUM_VICTIMS;
-       } else if (total_ramsize_mb <= MEM_SIZE_512) {
-               /* set thresholds for ram size 512M */
-               proactive_threshold_mb = PROACTIVE_512_THRES;
-               proactive_leave_mb = PROACTIVE_512_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_512_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_512_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_512_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_512_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_512_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_512_NUM_VICTIMS;
-       }  else if (total_ramsize_mb <= MEM_SIZE_768) {
-               /* set thresholds for ram size 768M */
-               proactive_threshold_mb = PROACTIVE_768_THRES;
-               proactive_leave_mb = PROACTIVE_768_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_768_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_768_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_768_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_768_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_768_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_768_NUM_VICTIMS;
-       } else if (total_ramsize_mb <= MEM_SIZE_1024) {
-               /* set thresholds for ram size more than 1G */
-               proactive_threshold_mb = PROACTIVE_1024_THRES;
-               proactive_leave_mb = PROACTIVE_1024_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_1024_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_1024_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_1024_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_1024_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_1024_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_1024_NUM_VICTIMS;
-       } else if (total_ramsize_mb <= MEM_SIZE_2048) {
-               proactive_threshold_mb = PROACTIVE_2048_THRES;
-               proactive_leave_mb = PROACTIVE_2048_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_2048_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_2048_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_2048_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_2048_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_2048_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_2048_NUM_VICTIMS;
-       } else {
-               proactive_threshold_mb = PROACTIVE_3072_THRES;
-               proactive_leave_mb = PROACTIVE_3072_LEAVE;
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_MEDIUM, CGROUP_ROOT_3072_THRES_DEDUP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_LOW, CGROUP_ROOT_3072_THRES_SWAP);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_CRITICAL, CGROUP_ROOT_3072_THRES_LOW);
-               memcg_set_threshold(MEMCG_ROOT, MEM_LEVEL_OOM, CGROUP_ROOT_3072_THRES_MEDIUM);
-               memcg_set_leave_threshold(MEMCG_ROOT, CGROUP_ROOT_3072_THRES_LEAVE);
-               num_max_victims = CGROUP_ROOT_3072_NUM_VICTIMS;
-       }
-}
-
-static void lowmem_move_memcgroup(int pid, int next_oom_score_adj, struct proc_app_info *pai)
-{
-       int cur_oom_score_adj;
-       int cur_memcg_idx;
-       struct memcg_info *mi;
-       int next_memcg_idx = cgroup_get_type(next_oom_score_adj);
-
-       mi = get_memcg_info(next_memcg_idx);
-
-       if (!mi) {
-               return;
-       }
-
-       if (!pai) {
-               cgroup_write_pid_fullpath(mi->name, pid);
-               return;
-       }
-
-       /* parent pid */
-       if (pai->main_pid == pid) {
-               cur_oom_score_adj = pai->memory.oom_score_adj;
-               cur_memcg_idx = cgroup_get_type(cur_oom_score_adj);
-
-               if (cur_oom_score_adj == next_oom_score_adj) {
-                       _D("next oom_score_adj (%d) is same with current one", next_oom_score_adj);
-                       return;
-               }
-
-               proc_set_process_memory_state(pai, next_memcg_idx, mi, next_oom_score_adj);
-
-               if (!lowmem_limit_move_cgroup(pai))
-                       return;
-
-               if(cur_memcg_idx == next_memcg_idx)
-                       return;
-
-               _I("app (%s) memory cgroup move from %s to %s", pai->appid, convert_cgroup_type_to_str(cur_memcg_idx), convert_cgroup_type_to_str(next_memcg_idx));
-               cgroup_write_pid_fullpath(mi->name, pid);
-               if (next_memcg_idx == MEMCG_THROTTLING)
-                       lowmem_swap_memory(get_memcg_info(MEMCG_THROTTLING)->name);
-       }
-       /* child pid */
-       else {
-               if (pai->memory.use_mem_limit)
-                       return;
-
-               cgroup_write_pid_fullpath(mi->name, pid);
-       }
-}
-
-static int lowmem_activate_worker(void)
-{
-       int ret = RESOURCED_ERROR_NONE;
-
-       if (LOWMEM_WORKER_IS_ACTIVE(&lmw)) {
-               return ret;
-       }
-
-       lmw.queue = g_async_queue_new_full(lowmem_request_destroy);
-       if (!lmw.queue) {
-               _E("Failed to create request queue\n");
-               return RESOURCED_ERROR_FAIL;
-       }
-       LOWMEM_WORKER_ACTIVATE(&lmw);
-       ret = pthread_create(&lmw.worker_thread, NULL,
-               (void *)lowmem_reclaim_worker, (void *)&lmw);
-       if (ret) {
-               LOWMEM_WORKER_DEACTIVATE(&lmw);
-               _E("Failed to create LMK thread: %d\n", ret);
-       } else {
-               pthread_detach(lmw.worker_thread);
-               ret = RESOURCED_ERROR_NONE;
-       }
-       return ret;
-}
-
-static void lowmem_deactivate_worker(void)
-{
-       struct lowmem_control *ctl;
-
-       if (!LOWMEM_WORKER_IS_ACTIVE(&lmw))
-               return;
-
-       LOWMEM_WORKER_DEACTIVATE(&lmw);
-       lowmem_drain_queue(&lmw);
-
-       ctl = LOWMEM_NEW_REQUEST();
-       if (!ctl) {
-               _E("Critical - g_slice alloc failed - Lowmem cannot be deactivated");
-               return;
-       }
-       ctl->flags = OOM_DROP;
-       g_async_queue_push(lmw.queue, ctl);
-       g_async_queue_unref(lmw.queue);
-}
-
-static int lowmem_press_eventfd_read(int fd)
-{
-       unsigned long long dummy_state;
-
-       return read(fd, &dummy_state, sizeof(dummy_state));
-}
-
-static void lowmem_press_root_cgroup_handler(void)
-{
-       static unsigned int prev_available_mb;
-       unsigned int available_mb;
-       int mem_state;
-
-       available_mb = proc_get_mem_available();
-       if (prev_available_mb == available_mb)
-               return;
-
-       mem_state = check_mem_state(available_mb);
-       lowmem_trigger_memory_state_action(mem_state);
-       prev_available_mb = available_mb;
-}
-
-static bool lowmem_press_eventfd_handler(int fd, void *data)
-{
-       struct memcg_info *mi;
-       enum cgroup_type type = MEMCG_ROOT;
-
-       // FIXME: probably shouldn't get ignored
-       if (lowmem_press_eventfd_read(fd) < 0)
-               _E("Failed to read lowmem press event, %m\n");
-
-       for (type = MEMCG_ROOT; type < MEMCG_END; type++) {
-               if (!get_cgroup_tree(type) || !get_memcg_info(type))
-                       continue;
-               mi = get_memcg_info(type);
-               if (fd == mi->evfd) {
-                       /* call low memory handler for this memcg */
-                       if (type == MEMCG_ROOT) {
-                               lowmem_press_root_cgroup_handler();
-                               return true;
-                       }
-                       else {
-                               _E("Wrong event fd for cgroup %s", convert_cgroup_type_to_str(type));
-                               return false;
-                       }
-               }
-       }
-
-       return false;
-}
-
-static int lowmem_press_register_eventfd(struct memcg_info *mi)
-{
-       int evfd;
-       const char *name = mi->name;
-       static fd_handler_h handler;
-
-       if (mi->threshold_mb[MEM_LEVEL_OOM] == LOWMEM_THRES_INIT)
-               return 0;
-
-       evfd = memcg_set_eventfd(name, MEMCG_EVENTFD_MEMORY_PRESSURE,
-                       event_level);
-
-       if (evfd < 0) {
-               int saved_errno = errno;
-               _E("Failed to register event press fd %s cgroup", name);
-               return -saved_errno;
-       }
-
-       mi->evfd = evfd;
-
-       add_fd_read_handler(NULL, evfd, lowmem_press_eventfd_handler, NULL, NULL, &handler);
-       return 0;
-}
-
-static int lowmem_press_setup_eventfd(void)
-{
-       unsigned int i;
-
-       for (i = MEMCG_ROOT; i < MEMCG_END; i++) {
-               if (!get_use_hierarchy(i))
-                       continue;
-
-               lowmem_press_register_eventfd(get_memcg_info(i));
-       }
-       return RESOURCED_ERROR_NONE;
-}
-
-static void lowmem_force_reclaim_cb(struct lowmem_control *ctl)
-{
-       lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
-}
-
-int lowmem_trigger_reclaim(int flags, int victims, enum oom_score score, int threshold_mb)
-{
-       struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
-
-       if (!ctl)
-               return -ENOMEM;
-
-       flags |= OOM_FORCE | OOM_IN_DEPTH | OOM_SINGLE_SHOT;
-       victims = victims > 0 ? victims : MAX_MEMORY_CGROUP_VICTIMS;
-       score = score > 0 ? score : OOM_SCORE_LOW;
-       threshold_mb = threshold_mb > 0 ? threshold_mb : get_root_memcg_info()->threshold_leave_mb;
-
-       lowmem_change_memory_state(MEM_LEVEL_CRITICAL, 1);
-       LOWMEM_SET_REQUEST(ctl, flags,
-               score, threshold_mb, victims,
-               lowmem_force_reclaim_cb);
-       lowmem_queue_request(&lmw, ctl);
-
-       return 0;
-}
-
-void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
-{
-       int size_mb, victims;
-
-       victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
-                                ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
-
-       size_mb = get_root_memcg_info()->threshold_leave_mb + BYTE_TO_MBYTE(swap_size_bytes);
-       lowmem_trigger_reclaim(0, victims, score, size_mb);
-}
-
-bool lowmem_fragmentated(void)
-{
-       struct buddyinfo bi;
-       int ret;
-
-       ret = proc_get_buddyinfo("Normal", &bi);
-       if (ret < 0)
-               return false;
-
-       /*
-        * The fragmentation_size is the minimum count of order-2 pages in "Normal" zone.
-        * If total buddy pages is smaller than fragmentation_size,
-        * resourced will detect kernel memory is fragmented.
-        * Default value is zero in low memory device.
-        */
-       if (bi.page[PAGE_32K] + (bi.page[PAGE_64K] << 1) + (bi.page[PAGE_128K] << 2) +
-               (bi.page[PAGE_256K] << 3) < fragmentation_size) {
-               _I("fragmentation detected, need to execute proactive oom killer");
-               return true;
-       }
-       return false;
-}
-
-static void lowmem_proactive_oom_killer(int flags, char *appid)
-{
-       unsigned int before_mb;
-       int victims;
-
-       before_mb = proc_get_mem_available();
-
-       /* If memory state is medium or normal, just return and kill in oom killer */
-       if (before_mb < get_root_memcg_info()->threshold_mb[MEM_LEVEL_OOM] ||
-                       before_mb > proactive_leave_mb)
-               return;
-
-       victims = num_max_victims  > MAX_PROACTIVE_HIGH_VICTIMS
-                                ? MAX_PROACTIVE_HIGH_VICTIMS : num_max_victims;
-
-#ifdef HEART_SUPPORT
-       /*
-        * This branch is used only when HEART module is compiled in and
-        * it's MEMORY module must be enabled. Otherwise this is skipped.
-        */
-       struct heart_memory_data *md = heart_memory_get_memdata(appid, DATA_LATEST);
-       if (md) {
-               unsigned int rss_mb, after_mb, size_mb;
-
-               rss_mb = KBYTE_TO_MBYTE(md->avg_rss);
-
-               free(md);
-
-               after_mb = before_mb - rss_mb;
-               /*
-                * after launching app, ensure that available memory is
-                * above threshold_leave
-                */
-               if (after_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
-                       return;
-
-               if (proactive_threshold_mb - rss_mb >= get_root_memcg_info()->threshold[MEM_LEVEL_OOM])
-                       size_mb = proactive_threshold_mb;
-               else
-                       size_mb = rss_mb + get_root_memcg_info()->threshold[MEM_LEVEL_OOM] + THRESHOLD_MARGIN;
-
-               _D("history based proactive LMK : avg rss %u, available %u required = %u MB",
-                       rss_mb, before_mb, size_mb);
-               lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, size_mb);
-
-               return;
-       }
-#endif
-
-       /*
-        * When there is no history data for the launching app,
-        * it is necessary to check current fragmentation state or application manifest file.
-        * So, resourced feels proactive LMK is required, run oom killer based on dynamic
-        * threshold.
-        */
-       if (lowmem_fragmentated())
-               goto reclaim;
-
-       /*
-        * run proactive oom killer only when available is larger than
-        * dynamic process threshold
-        */
-       if (!proactive_threshold_mb || before_mb >= proactive_threshold_mb)
-               return;
-
-       if (!(flags & PROC_LARGEMEMORY))
-               return;
-
-reclaim:
-       /*
-        * free THRESHOLD_MARGIN more than real should be freed,
-        * because launching app is consuming up the memory.
-        */
-       _D("Run threshold based proactive LMK: memory level to reach: %u MB\n",
-               proactive_leave_mb + THRESHOLD_MARGIN);
-       lowmem_trigger_reclaim(0, victims, OOM_SCORE_LOW, proactive_leave_mb + THRESHOLD_MARGIN);
-}
-
-unsigned int lowmem_get_proactive_thres(void)
-{
-       return proactive_threshold_mb;
-}
-
-static int lowmem_prelaunch_handler(void *data)
-{
-       struct proc_status *ps = (struct proc_status *)data;
-       struct proc_app_info *pai = ps->pai;
-
-       if (!pai || CHECK_BIT(pai->flags, PROC_SERVICEAPP))
-               return RESOURCED_ERROR_NONE;
-
-       lowmem_proactive_oom_killer(ps->pai->flags, ps->pai->appid);
-       return RESOURCED_ERROR_NONE;
-}
-
-int lowmem_control_handler(void *data)
-{
-       struct lowmem_control_data *lowmem_data;
-
-       lowmem_data = (struct lowmem_control_data *)data;
-       switch (lowmem_data->control_type) {
-       case LOWMEM_MOVE_CGROUP:
-               lowmem_move_memcgroup((pid_t)lowmem_data->pid,
-                                       lowmem_data->oom_score_adj, lowmem_data->pai);
-               break;
-       default:
-               break;
-       }
-       return RESOURCED_ERROR_NONE;
-}
-
-static inline int calculate_threshold_size(double ratio)
-{
-       unsigned long long size_bytes = (double)totalram_bytes * ratio / 100.0;
-       return BYTE_TO_MBYTE(size_bytes);
-}
-
-static void load_configs(void)
-{
-       struct memcg_conf *memcg_conf = get_memcg_conf();
-
-       /* set MemoryGroupLimit section */
-       for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
-               if (memcg_conf->cgroup_limit[cgroup] > 0.0)
-                       memcg_info_set_limit(get_memcg_info(cgroup),
-                                       memcg_conf->cgroup_limit[cgroup]/100.0, totalram_bytes);
-       }
-
-       /* set MemoryLevelThreshold section */
-       for (int lvl = MEM_LEVEL_MEDIUM; lvl < MEM_LEVEL_MAX; lvl++) {
-               if (memcg_conf->threshold[lvl].percent &&
-                       memcg_conf->threshold[lvl].threshold > 0) {
-                       memcg_set_threshold(MEMCG_ROOT, lvl,
-                                       calculate_threshold_size(memcg_conf->threshold[lvl].threshold));
-
-                       if (lvl == MEM_LEVEL_OOM) {
-                               memcg_set_leave_threshold(MEMCG_ROOT,
-                                               get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
-                               proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
-                               proactive_leave_mb = proactive_threshold_mb * 1.5;
-                       }
-               }
-               else if (memcg_conf->threshold[lvl].threshold > 0) {
-                       memcg_set_threshold(MEMCG_ROOT, lvl,
-                                       memcg_conf->threshold[lvl].threshold);
-
-                       if (lvl == MEM_LEVEL_OOM) {
-                               memcg_set_leave_threshold(MEMCG_ROOT,
-                                               get_memcg_info(MEMCG_ROOT)->threshold_mb[lvl] * 1.5);
-                               proactive_threshold_mb = get_memcg_info(MEMCG_ROOT)->threshold_leave_mb;
-                               proactive_leave_mb = proactive_threshold_mb * 1.5;
-                       }
-
-               }
-       }
-       oom_popup_enable = memcg_conf->oom_popup;
-
-       /* set MemoryAppTypeLimit and MemoryAppStatusLimit section */
-       lowmem_memory_init(memcg_conf->service.memory_bytes, memcg_conf->widget.memory_bytes,
-                       memcg_conf->guiapp.memory_bytes, memcg_conf->background.memory_bytes);
-       lowmem_action_init(memcg_conf->service.action, memcg_conf->widget.action,
-                       memcg_conf->guiapp.action, memcg_conf->background.action);
-
-       free_memcg_conf();
-}
-
-static void print_mem_configs(void)
-{
-       /* print info of Memory section */
-       for (int cgroup = MEMCG_THROTTLING; cgroup < MEMCG_END; cgroup++) {
-               _I("[MEMORY-CGROUP] set memory for cgroup '%s' to %llu bytes",
-                               convert_cgroup_type_to_str(cgroup), get_memcg_info(cgroup)->limit_bytes);
-       }
-
-       for (int cgroup = MEMCG_ROOT; cgroup < MEMCG_END; cgroup++) {
-               for (int mem_lvl = 0; mem_lvl < MEM_LEVEL_MAX; mem_lvl++) {
-                       _I("[MEMORY-LEVEL] set threshold of %s for memory level '%s' to %u MB", convert_cgroup_type_to_str(cgroup),
-                                       convert_memstate_to_str(mem_lvl), get_memcg_info(cgroup)->threshold_mb[mem_lvl]);
-               }
-       }
-
-       _I("[LMK] set number of max victims as %d", num_max_victims);
-       _I("[LMK] set threshold leave to %u MB", get_root_memcg_info()->threshold_leave_mb);
-       _I("[LMK] set proactive threshold to %u MB", proactive_threshold_mb);
-       _I("[LMK] set proactive low memory killer leave to %u MB", proactive_leave_mb);
-
-       /* print info of POPUP section */
-       _I("[POPUP] oom popup is %s", oom_popup_enable == true ? "enabled" : "disabled");
-}
-
-#include "file-helper.h"
-
-/* To Do: should we need lowmem_fd_start, lowmem_fd_stop ?? */
-static int lowmem_init(void)
-{
-       int ret = RESOURCED_ERROR_NONE;
-
-       _D("resourced memory init start");
-
-       /* init memcg */
-       ret = memcg_make_full_subdir(MEMCG_PATH);
-       ret_value_msg_if(ret < 0, ret, "memory cgroup init failed\n");
-       memcg_params_init();
-
-       setup_memcg_params();
-
-       /* default configuration */
-       load_configs();
-
-       /* this function should be called after parsing configurations */
-       memcg_write_limiter_params();
-       print_mem_configs();
-
-       /* make a worker thread called low memory killer */
-       ret = lowmem_activate_worker();
-       if (ret) {
-               _E("[LMK] oom thread create failed\n");
-               return ret;
-       }
-
-       /* register threshold and event fd */
-       ret = lowmem_press_setup_eventfd();
-       if (ret) {
-               _E("[MEMORY-LIMIT] eventfd setup failed");
-               return ret;
-       }
-
-       lowmem_dbus_init();
-       lowmem_limit_init();
-       lowmem_system_init();
-
-       register_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
-       register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
-
-       return ret;
-}
-
-static int lowmem_exit(void)
-{
-       lowmem_deactivate_worker();
-       lowmem_limit_exit();
-       lowmem_system_exit();
-
-       unregister_notifier(RESOURCED_NOTIFIER_APP_PRELAUNCH, lowmem_prelaunch_handler);
-       unregister_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
-
-       return RESOURCED_ERROR_NONE;
-}
-
-static int resourced_memory_init(void *data)
-{
-       return lowmem_init();
-}
-
-static int resourced_memory_finalize(void *data)
-{
-       return lowmem_exit();
-}
-
-void lowmem_change_memory_state(int state, int force)
-{
-       int mem_state;
-
-       if (force) {
-               mem_state = state;
-       } else {
-               unsigned int available_mb = proc_get_mem_available();
-               mem_state = check_mem_state(available_mb);
-       }
-
-       lowmem_trigger_memory_state_action(mem_state);
-}
-
-unsigned long lowmem_get_ktotalram(void)
-{
-       return totalram_kb;
-}
-
-unsigned long long lowmem_get_totalram(void)
-{
-       return totalram_bytes;
-}
-
-void lowmem_restore_memcg(struct proc_app_info *pai)
-{
-       char *cgpath;
-       int index, ret;
-       struct cgroup *cgroup = NULL;
-       struct memcg_info *mi = NULL;
-       pid_t pid = pai->main_pid;
-
-       ret = cgroup_pid_get_path("memory", pid, &cgpath);
-       if (ret < 0)
-               return;
-
-       for (index = MEMCG_END-1; index >= MEMCG_ROOT; index--) {
-               cgroup = get_cgroup_tree(index);
-               if (!cgroup)
-                       continue;
-
-               mi = cgroup->memcg_info;
-               if (!mi)
-                       continue;
-
-               if (!strcmp(cgroup->hashname, ""))
-                       continue;
-               if (strstr(cgpath, cgroup->hashname))
-                       break;
-       }
-       pai->memory.memcg_idx = index;
-       pai->memory.memcg_info = mi;
-       if(strstr(cgpath, pai->appid))
-               pai->memory.use_mem_limit = true;
-
-       free(cgpath);
-}
-
-static struct module_ops memory_modules_ops = {
-       .priority       = MODULE_PRIORITY_EARLY,
-       .name           = "lowmem",
-       .init           = resourced_memory_init,
-       .exit           = resourced_memory_finalize,
-};
-
-MODULE_REGISTER(&memory_modules_ops)
index e5f2c56fa0876fa3f910da220fecd456afcf1907..e0a103118478323d7812fb128513dadae0caf0a4 100644 (file)
@@ -26,7 +26,7 @@
 #include "swap-common.h"
 #include "memory-cgroup.h"
 #include "config-parser.h"
-#include "lowmem-handler.h"
+#include "lowmem.h"
 #include "losetup.h"
 
 #define FILESWAP_FULLNESS_RATIO        0.8
index 4d10ed42dba40478171e8cb5cfec238c3d508b7f..a2000fcc472eddc71cf3ff39b03e3b6209fb10e7 100644 (file)
@@ -26,7 +26,7 @@
 #include "swap-common.h"
 #include "memory-cgroup.h"
 #include "config-parser.h"
-#include "lowmem-handler.h"
+#include "lowmem.h"
 
 #define SWAP_ZRAM_DISK_SIZE            SWAP_ZRAM_SYSFILE"disksize"
 #define SWAP_ZRAM_MAX_COMP_STREAMS     SWAP_ZRAM_SYSFILE"max_comp_streams"
index fa2230577b5edf9a6c3ade42de35268ef642185d..18dfe87da34b56d73c58977979e91492be3261cf 100644 (file)
@@ -26,7 +26,7 @@
 #include "swap-common.h"
 #include "memory-cgroup.h"
 #include "config-parser.h"
-#include "lowmem-handler.h"
+#include "lowmem.h"
 #include "losetup.h"
 
 #define DEFAULT_ZSWAP_POOL_RATIO (25)